summaryrefslogtreecommitdiff
path: root/third-party/benchmark/test
diff options
context:
space:
mode:
Diffstat (limited to 'third-party/benchmark/test')
-rw-r--r--third-party/benchmark/test/AssemblyTests.cmake46
-rw-r--r--third-party/benchmark/test/BUILD74
-rw-r--r--third-party/benchmark/test/CMakeLists.txt271
-rw-r--r--third-party/benchmark/test/args_product_test.cc77
-rw-r--r--third-party/benchmark/test/basic_test.cc151
-rw-r--r--third-party/benchmark/test/benchmark_gtest.cc165
-rw-r--r--third-party/benchmark/test/benchmark_name_gtest.cc74
-rw-r--r--third-party/benchmark/test/benchmark_random_interleaving_gtest.cc126
-rw-r--r--third-party/benchmark/test/benchmark_test.cc245
-rw-r--r--third-party/benchmark/test/clobber_memory_assembly_test.cc64
-rw-r--r--third-party/benchmark/test/commandlineflags_gtest.cc228
-rw-r--r--third-party/benchmark/test/complexity_test.cc222
-rw-r--r--third-party/benchmark/test/cxx03_test.cc63
-rw-r--r--third-party/benchmark/test/diagnostics_test.cc80
-rw-r--r--third-party/benchmark/test/display_aggregates_only_test.cc43
-rw-r--r--third-party/benchmark/test/donotoptimize_assembly_test.cc163
-rw-r--r--third-party/benchmark/test/donotoptimize_test.cc52
-rw-r--r--third-party/benchmark/test/filter_test.cc118
-rw-r--r--third-party/benchmark/test/fixture_test.cc51
-rw-r--r--third-party/benchmark/test/internal_threading_test.cc184
-rw-r--r--third-party/benchmark/test/link_main_test.cc8
-rw-r--r--third-party/benchmark/test/map_test.cc57
-rw-r--r--third-party/benchmark/test/memory_manager_test.cc46
-rw-r--r--third-party/benchmark/test/multiple_ranges_test.cc96
-rw-r--r--third-party/benchmark/test/options_test.cc76
-rw-r--r--third-party/benchmark/test/output_test.h213
-rw-r--r--third-party/benchmark/test/output_test_helper.cc520
-rw-r--r--third-party/benchmark/test/perf_counters_gtest.cc145
-rw-r--r--third-party/benchmark/test/perf_counters_test.cc27
-rw-r--r--third-party/benchmark/test/register_benchmark_test.cc184
-rw-r--r--third-party/benchmark/test/repetitions_test.cc208
-rw-r--r--third-party/benchmark/test/report_aggregates_only_test.cc39
-rw-r--r--third-party/benchmark/test/reporter_output_test.cc956
-rw-r--r--third-party/benchmark/test/skip_with_error_test.cc195
-rw-r--r--third-party/benchmark/test/state_assembly_test.cc68
-rw-r--r--third-party/benchmark/test/statistics_gtest.cc28
-rw-r--r--third-party/benchmark/test/string_util_gtest.cc161
-rw-r--r--third-party/benchmark/test/templated_fixture_test.cc28
-rw-r--r--third-party/benchmark/test/user_counters_tabular_test.cc500
-rw-r--r--third-party/benchmark/test/user_counters_test.cc555
-rw-r--r--third-party/benchmark/test/user_counters_thousands_test.cc183
41 files changed, 6790 insertions, 0 deletions
diff --git a/third-party/benchmark/test/AssemblyTests.cmake b/third-party/benchmark/test/AssemblyTests.cmake
new file mode 100644
index 000000000000..3d078586f1de
--- /dev/null
+++ b/third-party/benchmark/test/AssemblyTests.cmake
@@ -0,0 +1,46 @@
+
+include(split_list)
+
+set(ASM_TEST_FLAGS "")
+check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
+if (BENCHMARK_HAS_O3_FLAG)
+ list(APPEND ASM_TEST_FLAGS -O3)
+endif()
+
+check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
+if (BENCHMARK_HAS_G0_FLAG)
+ list(APPEND ASM_TEST_FLAGS -g0)
+endif()
+
+check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
+if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
+ list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
+endif()
+
+split_list(ASM_TEST_FLAGS)
+string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
+
+macro(add_filecheck_test name)
+ cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
+ add_library(${name} OBJECT ${name}.cc)
+ set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
+ set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
+ add_custom_target(copy_${name} ALL
+ COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
+ $<TARGET_OBJECTS:${name}>
+ ${ASM_OUTPUT_FILE}
+ BYPRODUCTS ${ASM_OUTPUT_FILE})
+ add_dependencies(copy_${name} ${name})
+ if (NOT ARG_CHECK_PREFIXES)
+ set(ARG_CHECK_PREFIXES "CHECK")
+ endif()
+ foreach(prefix ${ARG_CHECK_PREFIXES})
+ add_test(NAME run_${name}_${prefix}
+ COMMAND
+ ${LLVM_FILECHECK_EXE} ${name}.cc
+ --input-file=${ASM_OUTPUT_FILE}
+ --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+ endforeach()
+endmacro()
+
diff --git a/third-party/benchmark/test/BUILD b/third-party/benchmark/test/BUILD
new file mode 100644
index 000000000000..1f27f99ede9f
--- /dev/null
+++ b/third-party/benchmark/test/BUILD
@@ -0,0 +1,74 @@
+TEST_COPTS = [
+ "-pedantic",
+ "-pedantic-errors",
+ "-std=c++11",
+ "-Wall",
+ "-Wextra",
+ "-Wshadow",
+ # "-Wshorten-64-to-32",
+ "-Wfloat-equal",
+ "-fstrict-aliasing",
+]
+
+PER_SRC_COPTS = ({
+ "cxx03_test.cc": ["-std=c++03"],
+ # Some of the issues with DoNotOptimize only occur when optimization is enabled
+ "donotoptimize_test.cc": ["-O3"],
+})
+
+TEST_ARGS = ["--benchmark_min_time=0.01"]
+
+PER_SRC_TEST_ARGS = ({
+ "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
+ "repetitions_test.cc": [" --benchmark_repetitions=3"],
+})
+
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+
+cc_library(
+ name = "output_test_helper",
+ testonly = 1,
+ srcs = ["output_test_helper.cc"],
+ hdrs = ["output_test.h"],
+ copts = TEST_COPTS,
+ deps = [
+ "//:benchmark",
+ "//:benchmark_internal_headers",
+ ],
+)
+
+[
+ cc_test(
+ name = test_src[:-len(".cc")],
+ size = "small",
+ srcs = [test_src],
+ args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
+ copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
+ deps = [
+ ":output_test_helper",
+ "//:benchmark",
+ "//:benchmark_internal_headers",
+ "@com_google_googletest//:gtest",
+ ] + (
+ ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
+ ),
+ # FIXME: Add support for assembly tests to bazel.
+ # See Issue #556
+ # https://github.com/google/benchmark/issues/556
+ )
+ for test_src in glob(
+ ["*test.cc"],
+ exclude = [
+ "*_assembly_test.cc",
+ "link_main_test.cc",
+ ],
+ )
+]
+
+cc_test(
+ name = "link_main_test",
+ size = "small",
+ srcs = ["link_main_test.cc"],
+ copts = TEST_COPTS,
+ deps = ["//:benchmark_main"],
+)
diff --git a/third-party/benchmark/test/CMakeLists.txt b/third-party/benchmark/test/CMakeLists.txt
new file mode 100644
index 000000000000..79cdf53b402c
--- /dev/null
+++ b/third-party/benchmark/test/CMakeLists.txt
@@ -0,0 +1,271 @@
+# Enable the tests
+
+find_package(Threads REQUIRED)
+include(CheckCXXCompilerFlag)
+
+# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
+# strip -DNDEBUG from the default CMake flags in DEBUG mode.
+string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
+if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
+ add_definitions( -UNDEBUG )
+ add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
+ # Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
+ foreach (flags_var_to_scrub
+ CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_RELWITHDEBINFO
+ CMAKE_CXX_FLAGS_MINSIZEREL
+ CMAKE_C_FLAGS_RELEASE
+ CMAKE_C_FLAGS_RELWITHDEBINFO
+ CMAKE_C_FLAGS_MINSIZEREL)
+ string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
+ "${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
+ endforeach()
+endif()
+
+check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
+set(BENCHMARK_O3_FLAG "")
+if (BENCHMARK_HAS_O3_FLAG)
+ set(BENCHMARK_O3_FLAG "-O3")
+endif()
+
+# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
+# they will break the configuration check.
+if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
+ list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
+endif()
+
+add_library(output_test_helper STATIC output_test_helper.cc output_test.h)
+
+macro(compile_benchmark_test name)
+ add_executable(${name} "${name}.cc")
+ target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT})
+endmacro(compile_benchmark_test)
+
+macro(compile_benchmark_test_with_main name)
+ add_executable(${name} "${name}.cc")
+ target_link_libraries(${name} benchmark::benchmark_main)
+endmacro(compile_benchmark_test_with_main)
+
+macro(compile_output_test name)
+ add_executable(${name} "${name}.cc" output_test.h)
+ target_link_libraries(${name} output_test_helper benchmark::benchmark
+ ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+endmacro(compile_output_test)
+
+# Demonstration executable
+compile_benchmark_test(benchmark_test)
+add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(filter_test)
+macro(add_filter_test name filter expect)
+ add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
+ add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
+endmacro(add_filter_test)
+
+add_filter_test(filter_simple "Foo" 3)
+add_filter_test(filter_simple_negative "-Foo" 2)
+add_filter_test(filter_suffix "BM_.*" 4)
+add_filter_test(filter_suffix_negative "-BM_.*" 1)
+add_filter_test(filter_regex_all ".*" 5)
+add_filter_test(filter_regex_all_negative "-.*" 0)
+add_filter_test(filter_regex_blank "" 5)
+add_filter_test(filter_regex_blank_negative "-" 0)
+add_filter_test(filter_regex_none "monkey" 0)
+add_filter_test(filter_regex_none_negative "-monkey" 5)
+add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
+add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2)
+add_filter_test(filter_regex_begin "^BM_.*" 4)
+add_filter_test(filter_regex_begin_negative "-^BM_.*" 1)
+add_filter_test(filter_regex_begin2 "^N" 1)
+add_filter_test(filter_regex_begin2_negative "-^N" 4)
+add_filter_test(filter_regex_end ".*Ba$" 1)
+add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
+
+compile_benchmark_test(options_test)
+add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(basic_test)
+add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01)
+
+compile_output_test(repetitions_test)
+add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01 --benchmark_repetitions=3)
+
+compile_benchmark_test(diagnostics_test)
+add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(skip_with_error_test)
+add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(donotoptimize_test)
+# Some of the issues with DoNotOptimize only occur when optimization is enabled
+check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
+if (BENCHMARK_HAS_O3_FLAG)
+ set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
+endif()
+add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(fixture_test)
+add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(register_benchmark_test)
+add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(map_test)
+add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(multiple_ranges_test)
+add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
+
+compile_benchmark_test(args_product_test)
+add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01)
+
+compile_benchmark_test_with_main(link_main_test)
+add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
+
+compile_output_test(reporter_output_test)
+add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01)
+
+compile_output_test(templated_fixture_test)
+add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01)
+
+compile_output_test(user_counters_test)
+add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01)
+
+compile_output_test(perf_counters_test)
+add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01 --benchmark_perf_counters=CYCLES,BRANCHES)
+
+compile_output_test(internal_threading_test)
+add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01)
+
+compile_output_test(report_aggregates_only_test)
+add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01)
+
+compile_output_test(display_aggregates_only_test)
+add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01)
+
+compile_output_test(user_counters_tabular_test)
+add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
+
+compile_output_test(user_counters_thousands_test)
+add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01)
+
+compile_output_test(memory_manager_test)
+add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01)
+
+check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
+if (BENCHMARK_HAS_CXX03_FLAG)
+ compile_benchmark_test(cxx03_test)
+ set_target_properties(cxx03_test
+ PROPERTIES
+ CXX_STANDARD 98
+ CXX_STANDARD_REQUIRED YES)
+ # libstdc++ provides different definitions within <map> between dialects. When
+ # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
+ # causing the test to fail to compile. To prevent this we explicitly disable
+ # the warning.
+ check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR)
+ if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR)
+ set_target_properties(cxx03_test
+ PROPERTIES
+ LINK_FLAGS "-Wno-odr")
+ endif()
+ add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01)
+endif()
+
+# Attempt to work around flaky test failures when running on Appveyor servers.
+if (DEFINED ENV{APPVEYOR})
+ set(COMPLEXITY_MIN_TIME "0.5")
+else()
+ set(COMPLEXITY_MIN_TIME "0.01")
+endif()
+compile_output_test(complexity_test)
+add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
+
+###############################################################################
+# GoogleTest Unit Tests
+###############################################################################
+
+if (BENCHMARK_ENABLE_GTEST_TESTS)
+ macro(compile_gtest name)
+ add_executable(${name} "${name}.cc")
+ target_link_libraries(${name} benchmark::benchmark
+ gmock_main ${CMAKE_THREAD_LIBS_INIT})
+ endmacro(compile_gtest)
+
+ macro(add_gtest name)
+ compile_gtest(${name})
+ add_test(NAME ${name} COMMAND ${name})
+ endmacro()
+
+ add_gtest(benchmark_gtest)
+ add_gtest(benchmark_name_gtest)
+ add_gtest(benchmark_random_interleaving_gtest)
+ add_gtest(commandlineflags_gtest)
+ add_gtest(statistics_gtest)
+ add_gtest(string_util_gtest)
+ add_gtest(perf_counters_gtest)
+endif(BENCHMARK_ENABLE_GTEST_TESTS)
+
+###############################################################################
+# Assembly Unit Tests
+###############################################################################
+
+if (BENCHMARK_ENABLE_ASSEMBLY_TESTS)
+ if (NOT LLVM_FILECHECK_EXE)
+ message(FATAL_ERROR "LLVM FileCheck is required when including this file")
+ endif()
+ include(AssemblyTests.cmake)
+ add_filecheck_test(donotoptimize_assembly_test)
+ add_filecheck_test(state_assembly_test)
+ add_filecheck_test(clobber_memory_assembly_test)
+endif()
+
+
+
+###############################################################################
+# Code Coverage Configuration
+###############################################################################
+
+# Add the coverage command(s)
+if(CMAKE_BUILD_TYPE)
+ string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
+endif()
+if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
+ find_program(GCOV gcov)
+ find_program(LCOV lcov)
+ find_program(GENHTML genhtml)
+ find_program(CTEST ctest)
+ if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE)
+ add_custom_command(
+ OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
+ COMMAND ${LCOV} -q -z -d .
+ COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
+ COMMAND ${CTEST} --force-new-ctest-process
+ COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
+ COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov
+ COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
+ COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark
+ DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "Running LCOV"
+ )
+ add_custom_target(coverage
+ DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
+ COMMENT "LCOV report at lcov/index.html"
+ )
+ message(STATUS "Coverage command added")
+ else()
+ if (HAVE_CXX_FLAG_COVERAGE)
+ set(CXX_FLAG_COVERAGE_MESSAGE supported)
+ else()
+ set(CXX_FLAG_COVERAGE_MESSAGE unavailable)
+ endif()
+ message(WARNING
+ "Coverage not available:\n"
+ " gcov: ${GCOV}\n"
+ " lcov: ${LCOV}\n"
+ " genhtml: ${GENHTML}\n"
+ " ctest: ${CTEST}\n"
+ " --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}")
+ endif()
+endif()
diff --git a/third-party/benchmark/test/args_product_test.cc b/third-party/benchmark/test/args_product_test.cc
new file mode 100644
index 000000000000..32a75d50dd9e
--- /dev/null
+++ b/third-party/benchmark/test/args_product_test.cc
@@ -0,0 +1,77 @@
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <iostream>
+#include <set>
+#include <vector>
+
+class ArgsProductFixture : public ::benchmark::Fixture {
+ public:
+ ArgsProductFixture()
+ : expectedValues({{0, 100, 2000, 30000},
+ {1, 15, 3, 8},
+ {1, 15, 3, 9},
+ {1, 15, 7, 8},
+ {1, 15, 7, 9},
+ {1, 15, 10, 8},
+ {1, 15, 10, 9},
+ {2, 15, 3, 8},
+ {2, 15, 3, 9},
+ {2, 15, 7, 8},
+ {2, 15, 7, 9},
+ {2, 15, 10, 8},
+ {2, 15, 10, 9},
+ {4, 5, 6, 11}}) {}
+
+ void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ std::vector<int64_t> ranges = {state.range(0), state.range(1),
+ state.range(2), state.range(3)};
+
+ assert(expectedValues.find(ranges) != expectedValues.end());
+
+ actualValues.insert(ranges);
+ }
+
+ // NOTE: This is not TearDown as we want to check after _all_ runs are
+ // complete.
+ virtual ~ArgsProductFixture() {
+ if (actualValues != expectedValues) {
+ std::cout << "EXPECTED\n";
+ for (auto v : expectedValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ std::cout << "ACTUAL\n";
+ for (auto v : actualValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ }
+ }
+
+ std::set<std::vector<int64_t>> expectedValues;
+ std::set<std::vector<int64_t>> actualValues;
+};
+
+BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) {
+ for (auto _ : state) {
+ int64_t product =
+ state.range(0) * state.range(1) * state.range(2) * state.range(3);
+ for (int64_t x = 0; x < product; x++) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+
+BENCHMARK_REGISTER_F(ArgsProductFixture, Empty)
+ ->Args({0, 100, 2000, 30000})
+ ->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}})
+ ->Args({4, 5, 6, 11});
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/basic_test.cc b/third-party/benchmark/test/basic_test.cc
new file mode 100644
index 000000000000..33642211e205
--- /dev/null
+++ b/third-party/benchmark/test/basic_test.cc
@@ -0,0 +1,151 @@
+
+#include "benchmark/benchmark.h"
+
+#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
+
+void BM_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+}
+BENCHMARK(BM_empty);
+BENCHMARK(BM_empty)->ThreadPerCpu();
+
+void BM_spin_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ for (int x = 0; x < state.range(0); ++x) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+BASIC_BENCHMARK_TEST(BM_spin_empty);
+BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
+
+void BM_spin_pause_before(benchmark::State& state) {
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ for (auto _ : state) {
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_before);
+BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
+
+void BM_spin_pause_during(benchmark::State& state) {
+ for (auto _ : state) {
+ state.PauseTiming();
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ state.ResumeTiming();
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_during);
+BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
+
+void BM_pause_during(benchmark::State& state) {
+ for (auto _ : state) {
+ state.PauseTiming();
+ state.ResumeTiming();
+ }
+}
+BENCHMARK(BM_pause_during);
+BENCHMARK(BM_pause_during)->ThreadPerCpu();
+BENCHMARK(BM_pause_during)->UseRealTime();
+BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
+
+void BM_spin_pause_after(benchmark::State& state) {
+ for (auto _ : state) {
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ }
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_after);
+BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
+
+void BM_spin_pause_before_and_after(benchmark::State& state) {
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ for (auto _ : state) {
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+ }
+ for (int i = 0; i < state.range(0); ++i) {
+ benchmark::DoNotOptimize(i);
+ }
+}
+BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
+BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
+
+void BM_empty_stop_start(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_empty_stop_start);
+BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
+
+
+void BM_KeepRunning(benchmark::State& state) {
+ benchmark::IterationCount iter_count = 0;
+ assert(iter_count == state.iterations());
+ while (state.KeepRunning()) {
+ ++iter_count;
+ }
+ assert(iter_count == state.iterations());
+}
+BENCHMARK(BM_KeepRunning);
+
+void BM_KeepRunningBatch(benchmark::State& state) {
+ // Choose a batch size >1000 to skip the typical runs with iteration
+ // targets of 10, 100 and 1000. If these are not actually skipped the
+ // bug would be detectable as consecutive runs with the same iteration
+ // count. Below we assert that this does not happen.
+ const benchmark::IterationCount batch_size = 1009;
+
+ static benchmark::IterationCount prior_iter_count = 0;
+ benchmark::IterationCount iter_count = 0;
+ while (state.KeepRunningBatch(batch_size)) {
+ iter_count += batch_size;
+ }
+ assert(state.iterations() == iter_count);
+
+ // Verify that the iteration count always increases across runs (see
+ // comment above).
+ assert(iter_count == batch_size // max_iterations == 1
+ || iter_count > prior_iter_count); // max_iterations > batch_size
+ prior_iter_count = iter_count;
+}
+// Register with a fixed repetition count to establish the invariant that
+// the iteration count should always change across runs. This overrides
+// the --benchmark_repetitions command line flag, which would otherwise
+// cause this test to fail if set > 1.
+BENCHMARK(BM_KeepRunningBatch)->Repetitions(1);
+
+void BM_RangedFor(benchmark::State& state) {
+ benchmark::IterationCount iter_count = 0;
+ for (auto _ : state) {
+ ++iter_count;
+ }
+ assert(iter_count == state.max_iterations);
+}
+BENCHMARK(BM_RangedFor);
+
+// Ensure that StateIterator provides all the necessary typedefs required to
+// instantiate std::iterator_traits.
+static_assert(std::is_same<
+ typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
+ typename benchmark::State::StateIterator::value_type>::value, "");
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/benchmark_gtest.cc b/third-party/benchmark/test/benchmark_gtest.cc
new file mode 100644
index 000000000000..14a885ba46da
--- /dev/null
+++ b/third-party/benchmark/test/benchmark_gtest.cc
@@ -0,0 +1,165 @@
+#include <map>
+#include <string>
+#include <vector>
+
+#include "../src/benchmark_register.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace benchmark {
+namespace internal {
+extern std::map<std::string, std::string>* global_context;
+
+namespace {
+
+TEST(AddRangeTest, Simple) {
+ std::vector<int> dst;
+ AddRange(&dst, 1, 2, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2));
+}
+
+TEST(AddRangeTest, Simple64) {
+ std::vector<int64_t> dst;
+ AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2));
+}
+
+TEST(AddRangeTest, Advanced) {
+ std::vector<int> dst;
+ AddRange(&dst, 5, 15, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
+}
+
+TEST(AddRangeTest, Advanced64) {
+ std::vector<int64_t> dst;
+ AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
+ EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
+}
+
+TEST(AddRangeTest, FullRange8) {
+ std::vector<int8_t> dst;
+ AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
+}
+
+TEST(AddRangeTest, FullRange64) {
+ std::vector<int64_t> dst;
+ AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
+ EXPECT_THAT(
+ dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
+ 1099511627776LL, 1125899906842624LL,
+ 1152921504606846976LL, 9223372036854775807LL));
+}
+
+TEST(AddRangeTest, NegativeRanges) {
+ std::vector<int> dst;
+ AddRange(&dst, -8, 0, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
+}
+
+TEST(AddRangeTest, StrictlyNegative) {
+ std::vector<int> dst;
+ AddRange(&dst, -8, -1, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
+}
+
+TEST(AddRangeTest, SymmetricNegativeRanges) {
+ std::vector<int> dst;
+ AddRange(&dst, -8, 8, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
+}
+
+TEST(AddRangeTest, SymmetricNegativeRangesOddMult) {
+ std::vector<int> dst;
+ AddRange(&dst, -30, 32, 5);
+ EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
+}
+
+TEST(AddRangeTest, NegativeRangesAsymmetric) {
+ std::vector<int> dst;
+ AddRange(&dst, -3, 5, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
+}
+
+TEST(AddRangeTest, NegativeRangesLargeStep) {
+ // Always include -1, 0, 1 when crossing zero.
+ std::vector<int> dst;
+ AddRange(&dst, -8, 8, 10);
+ EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
+}
+
+TEST(AddRangeTest, ZeroOnlyRange) {
+ std::vector<int> dst;
+ AddRange(&dst, 0, 0, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(0));
+}
+
+TEST(AddRangeTest, ZeroStartingRange) {
+ std::vector<int> dst;
+ AddRange(&dst, 0, 2, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(0, 1, 2));
+}
+
+TEST(AddRangeTest, NegativeRange64) {
+ std::vector<int64_t> dst;
+ AddRange<int64_t>(&dst, -4, 4, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
+}
+
+TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
+ // If elements already exist in the range, ensure we don't change
+ // their ordering by adding negative values.
+ std::vector<int64_t> dst = {1, 2, 3};
+ AddRange<int64_t>(&dst, -2, 2, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
+}
+
+TEST(AddRangeTest, FullNegativeRange64) {
+ std::vector<int64_t> dst;
+ const auto min = std::numeric_limits<int64_t>::min();
+ const auto max = std::numeric_limits<int64_t>::max();
+ AddRange(&dst, min, max, 1024);
+ EXPECT_THAT(
+ dst, testing::ElementsAreArray(std::vector<int64_t>{
+ min, -1152921504606846976LL, -1125899906842624LL,
+ -1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL,
+ 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
+ 1125899906842624LL, 1152921504606846976LL, max}));
+}
+
+TEST(AddRangeTest, Simple8) {
+ std::vector<int8_t> dst;
+ AddRange<int8_t>(&dst, 1, 8, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
+}
+
+TEST(AddCustomContext, Simple) {
+ EXPECT_THAT(global_context, nullptr);
+
+ AddCustomContext("foo", "bar");
+ AddCustomContext("baz", "qux");
+
+ EXPECT_THAT(*global_context,
+ testing::UnorderedElementsAre(testing::Pair("foo", "bar"),
+ testing::Pair("baz", "qux")));
+
+ delete global_context;
+ global_context = nullptr;
+}
+
+TEST(AddCustomContext, DuplicateKey) {
+ EXPECT_THAT(global_context, nullptr);
+
+ AddCustomContext("foo", "bar");
+ AddCustomContext("foo", "qux");
+
+ EXPECT_THAT(*global_context,
+ testing::UnorderedElementsAre(testing::Pair("foo", "bar")));
+
+ delete global_context;
+ global_context = nullptr;
+}
+
+} // namespace
+} // namespace internal
+} // namespace benchmark
diff --git a/third-party/benchmark/test/benchmark_name_gtest.cc b/third-party/benchmark/test/benchmark_name_gtest.cc
new file mode 100644
index 000000000000..afb401c1f532
--- /dev/null
+++ b/third-party/benchmark/test/benchmark_name_gtest.cc
@@ -0,0 +1,74 @@
+#include "benchmark/benchmark.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using namespace benchmark;
+using namespace benchmark::internal;
+
+TEST(BenchmarkNameTest, Empty) {
+ const auto name = BenchmarkName();
+ EXPECT_EQ(name.str(), std::string());
+}
+
+TEST(BenchmarkNameTest, FunctionName) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ EXPECT_EQ(name.str(), "function_name");
+}
+
+TEST(BenchmarkNameTest, FunctionNameAndArgs) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.args = "some_args:3/4/5";
+ EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
+}
+
+TEST(BenchmarkNameTest, MinTime) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.args = "some_args:3/4";
+ name.min_time = "min_time:3.4s";
+ EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
+}
+
+TEST(BenchmarkNameTest, Iterations) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.min_time = "min_time:3.4s";
+ name.iterations = "iterations:42";
+ EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
+}
+
+TEST(BenchmarkNameTest, Repetitions) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.min_time = "min_time:3.4s";
+ name.repetitions = "repetitions:24";
+ EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
+}
+
+TEST(BenchmarkNameTest, TimeType) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.min_time = "min_time:3.4s";
+ name.time_type = "hammer_time";
+ EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
+}
+
+TEST(BenchmarkNameTest, Threads) {
+ auto name = BenchmarkName();
+ name.function_name = "function_name";
+ name.min_time = "min_time:3.4s";
+ name.threads = "threads:256";
+ EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
+}
+
+TEST(BenchmarkNameTest, TestEmptyFunctionName) {
+ auto name = BenchmarkName();
+ name.args = "first:3/second:4";
+ name.threads = "threads:22";
+ EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
+}
+
+} // end namespace
diff --git a/third-party/benchmark/test/benchmark_random_interleaving_gtest.cc b/third-party/benchmark/test/benchmark_random_interleaving_gtest.cc
new file mode 100644
index 000000000000..8e28dab3f41d
--- /dev/null
+++ b/third-party/benchmark/test/benchmark_random_interleaving_gtest.cc
@@ -0,0 +1,126 @@
+#include <queue>
+#include <string>
+#include <vector>
+
+#include "../src/commandlineflags.h"
+#include "../src/string_util.h"
+#include "benchmark/benchmark.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+DECLARE_bool(benchmark_enable_random_interleaving);
+DECLARE_string(benchmark_filter);
+DECLARE_int32(benchmark_repetitions);
+
+namespace benchmark {
+namespace internal {
+namespace {
+
+class EventQueue : public std::queue<std::string> {
+ public:
+ void Put(const std::string& event) { push(event); }
+
+ void Clear() {
+ while (!empty()) {
+ pop();
+ }
+ }
+
+ std::string Get() {
+ std::string event = front();
+ pop();
+ return event;
+ }
+};
+
+static EventQueue* queue = new EventQueue;
+
+class NullReporter : public BenchmarkReporter {
+ public:
+ bool ReportContext(const Context& /*context*/) override { return true; }
+ void ReportRuns(const std::vector<Run>& /* report */) override {}
+};
+
+class BenchmarkTest : public testing::Test {
+ public:
+ static void SetupHook(int /* num_threads */) { queue->push("Setup"); }
+
+ static void TeardownHook(int /* num_threads */) { queue->push("Teardown"); }
+
+ void Execute(const std::string& pattern) {
+ queue->Clear();
+
+ BenchmarkReporter* reporter = new NullReporter;
+ FLAGS_benchmark_filter = pattern;
+ RunSpecifiedBenchmarks(reporter);
+ delete reporter;
+
+ queue->Put("DONE"); // End marker
+ }
+};
+
+static void BM_Match1(benchmark::State& state) {
+ const int64_t arg = state.range(0);
+
+ for (auto _ : state) {
+ }
+ queue->Put(StrFormat("BM_Match1/%d", static_cast<int>(arg)));
+}
+BENCHMARK(BM_Match1)
+ ->Iterations(100)
+ ->Arg(1)
+ ->Arg(2)
+ ->Arg(3)
+ ->Range(10, 80)
+ ->Args({90})
+ ->Args({100});
+
+TEST_F(BenchmarkTest, Match1) {
+ Execute("BM_Match1");
+ ASSERT_EQ("BM_Match1/1", queue->Get());
+ ASSERT_EQ("BM_Match1/2", queue->Get());
+ ASSERT_EQ("BM_Match1/3", queue->Get());
+ ASSERT_EQ("BM_Match1/10", queue->Get());
+ ASSERT_EQ("BM_Match1/64", queue->Get());
+ ASSERT_EQ("BM_Match1/80", queue->Get());
+ ASSERT_EQ("BM_Match1/90", queue->Get());
+ ASSERT_EQ("BM_Match1/100", queue->Get());
+ ASSERT_EQ("DONE", queue->Get());
+}
+
+TEST_F(BenchmarkTest, Match1WithRepetition) {
+ FLAGS_benchmark_repetitions = 2;
+
+ Execute("BM_Match1/(64|80)");
+ ASSERT_EQ("BM_Match1/64", queue->Get());
+ ASSERT_EQ("BM_Match1/64", queue->Get());
+ ASSERT_EQ("BM_Match1/80", queue->Get());
+ ASSERT_EQ("BM_Match1/80", queue->Get());
+ ASSERT_EQ("DONE", queue->Get());
+}
+
+TEST_F(BenchmarkTest, Match1WithRandomInterleaving) {
+ FLAGS_benchmark_enable_random_interleaving = true;
+ FLAGS_benchmark_repetitions = 100;
+
+ std::map<std::string, int> element_count;
+ std::map<std::string, int> interleaving_count;
+ Execute("BM_Match1/(64|80)");
+ for (int i = 0; i < 100; ++i) {
+ std::vector<std::string> interleaving;
+ interleaving.push_back(queue->Get());
+ interleaving.push_back(queue->Get());
+ element_count[interleaving[0].c_str()]++;
+ element_count[interleaving[1].c_str()]++;
+ interleaving_count[StrFormat("%s,%s", interleaving[0].c_str(),
+ interleaving[1].c_str())]++;
+ }
+ EXPECT_EQ(element_count["BM_Match1/64"], 100) << "Unexpected repetitions.";
+ EXPECT_EQ(element_count["BM_Match1/80"], 100) << "Unexpected repetitions.";
+ EXPECT_GE(interleaving_count.size(), 2) << "Interleaving was not randomized.";
+ ASSERT_EQ("DONE", queue->Get());
+}
+
+} // namespace
+} // namespace internal
+} // namespace benchmark
diff --git a/third-party/benchmark/test/benchmark_test.cc b/third-party/benchmark/test/benchmark_test.cc
new file mode 100644
index 000000000000..3cd4f5565fa1
--- /dev/null
+++ b/third-party/benchmark/test/benchmark_test.cc
@@ -0,0 +1,245 @@
+#include "benchmark/benchmark.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <chrono>
+#include <cstdlib>
+#include <iostream>
+#include <limits>
+#include <list>
+#include <map>
+#include <mutex>
+#include <set>
+#include <sstream>
+#include <string>
+#include <thread>
+#include <utility>
+#include <vector>
+
+#if defined(__GNUC__)
+#define BENCHMARK_NOINLINE __attribute__((noinline))
+#else
+#define BENCHMARK_NOINLINE
+#endif
+
+namespace {
+
+int BENCHMARK_NOINLINE Factorial(uint32_t n) {
+ return (n == 1) ? 1 : n * Factorial(n - 1);
+}
+
+double CalculatePi(int depth) {
+ double pi = 0.0;
+ for (int i = 0; i < depth; ++i) {
+ double numerator = static_cast<double>(((i % 2) * 2) - 1);
+ double denominator = static_cast<double>((2 * i) - 1);
+ pi += numerator / denominator;
+ }
+ return (pi - 1.0) * 4;
+}
+
+std::set<int64_t> ConstructRandomSet(int64_t size) {
+ std::set<int64_t> s;
+ for (int i = 0; i < size; ++i) s.insert(s.end(), i);
+ return s;
+}
+
+std::mutex test_vector_mu;
+std::vector<int>* test_vector = nullptr;
+
+} // end namespace
+
+static void BM_Factorial(benchmark::State& state) {
+ int fac_42 = 0;
+ for (auto _ : state) fac_42 = Factorial(8);
+ // Prevent compiler optimizations
+ std::stringstream ss;
+ ss << fac_42;
+ state.SetLabel(ss.str());
+}
+BENCHMARK(BM_Factorial);
+BENCHMARK(BM_Factorial)->UseRealTime();
+
+static void BM_CalculatePiRange(benchmark::State& state) {
+ double pi = 0.0;
+ for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
+ std::stringstream ss;
+ ss << pi;
+ state.SetLabel(ss.str());
+}
+BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
+
+static void BM_CalculatePi(benchmark::State& state) {
+ static const int depth = 1024;
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
+ }
+}
+BENCHMARK(BM_CalculatePi)->Threads(8);
+BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
+BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
+
+static void BM_SetInsert(benchmark::State& state) {
+ std::set<int64_t> data;
+ for (auto _ : state) {
+ state.PauseTiming();
+ data = ConstructRandomSet(state.range(0));
+ state.ResumeTiming();
+ for (int j = 0; j < state.range(1); ++j) data.insert(rand());
+ }
+ state.SetItemsProcessed(state.iterations() * state.range(1));
+ state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
+}
+
+// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
+// non-timed part of each iteration will make the benchmark take forever.
+BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
+
+template <typename Container,
+ typename ValueType = typename Container::value_type>
+static void BM_Sequential(benchmark::State& state) {
+ ValueType v = 42;
+ for (auto _ : state) {
+ Container c;
+ for (int64_t i = state.range(0); --i;) c.push_back(v);
+ }
+ const int64_t items_processed = state.iterations() * state.range(0);
+ state.SetItemsProcessed(items_processed);
+ state.SetBytesProcessed(items_processed * sizeof(v));
+}
+BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
+ ->Range(1 << 0, 1 << 10);
+BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
+// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
+#ifdef BENCHMARK_HAS_CXX11
+BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
+#endif
+
+static void BM_StringCompare(benchmark::State& state) {
+ size_t len = static_cast<size_t>(state.range(0));
+ std::string s1(len, '-');
+ std::string s2(len, '-');
+ for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
+}
+BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
+
+static void BM_SetupTeardown(benchmark::State& state) {
+ if (state.thread_index == 0) {
+ // No need to lock test_vector_mu here as this is running single-threaded.
+ test_vector = new std::vector<int>();
+ }
+ int i = 0;
+ for (auto _ : state) {
+ std::lock_guard<std::mutex> l(test_vector_mu);
+ if (i % 2 == 0)
+ test_vector->push_back(i);
+ else
+ test_vector->pop_back();
+ ++i;
+ }
+ if (state.thread_index == 0) {
+ delete test_vector;
+ }
+}
+BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
+
+static void BM_LongTest(benchmark::State& state) {
+ double tracker = 0.0;
+ for (auto _ : state) {
+ for (int i = 0; i < state.range(0); ++i)
+ benchmark::DoNotOptimize(tracker += i);
+ }
+}
+BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
+
+static void BM_ParallelMemset(benchmark::State& state) {
+ int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
+ int thread_size = static_cast<int>(size) / state.threads;
+ int from = thread_size * state.thread_index;
+ int to = from + thread_size;
+
+ if (state.thread_index == 0) {
+ test_vector = new std::vector<int>(static_cast<size_t>(size));
+ }
+
+ for (auto _ : state) {
+ for (int i = from; i < to; i++) {
+ // No need to lock test_vector_mu as ranges
+ // do not overlap between threads.
+ benchmark::DoNotOptimize(test_vector->at(i) = 1);
+ }
+ }
+
+ if (state.thread_index == 0) {
+ delete test_vector;
+ }
+}
+BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
+
+static void BM_ManualTiming(benchmark::State& state) {
+ int64_t slept_for = 0;
+ int64_t microseconds = state.range(0);
+ std::chrono::duration<double, std::micro> sleep_duration{
+ static_cast<double>(microseconds)};
+
+ for (auto _ : state) {
+ auto start = std::chrono::high_resolution_clock::now();
+ // Simulate some useful workload with a sleep
+ std::this_thread::sleep_for(
+ std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
+ auto end = std::chrono::high_resolution_clock::now();
+
+ auto elapsed =
+ std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
+
+ state.SetIterationTime(elapsed.count());
+ slept_for += microseconds;
+ }
+ state.SetItemsProcessed(slept_for);
+}
+BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
+BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
+
+#ifdef BENCHMARK_HAS_CXX11
+
+template <class... Args>
+void BM_with_args(benchmark::State& state, Args&&...) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
+BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
+ std::pair<int, double>(42, 3.8));
+
+void BM_non_template_args(benchmark::State& state, int, double) {
+ while(state.KeepRunning()) {}
+}
+BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
+
+#endif // BENCHMARK_HAS_CXX11
+
+static void BM_DenseThreadRanges(benchmark::State& st) {
+ switch (st.range(0)) {
+ case 1:
+ assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
+ break;
+ case 2:
+ assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
+ break;
+ case 3:
+ assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
+ st.threads == 14);
+ break;
+ default:
+ assert(false && "Invalid test case number");
+ }
+ while (st.KeepRunning()) {
+ }
+}
+BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
+BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
+BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/clobber_memory_assembly_test.cc b/third-party/benchmark/test/clobber_memory_assembly_test.cc
new file mode 100644
index 000000000000..f41911a39ce7
--- /dev/null
+++ b/third-party/benchmark/test/clobber_memory_assembly_test.cc
@@ -0,0 +1,64 @@
+#include <benchmark/benchmark.h>
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wreturn-type"
+#endif
+
+extern "C" {
+
+extern int ExternInt;
+extern int ExternInt2;
+extern int ExternInt3;
+
+}
+
+// CHECK-LABEL: test_basic:
+extern "C" void test_basic() {
+ int x;
+ benchmark::DoNotOptimize(&x);
+ x = 101;
+ benchmark::ClobberMemory();
+ // CHECK: leaq [[DEST:[^,]+]], %rax
+ // CHECK: movl $101, [[DEST]]
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_redundant_store:
+extern "C" void test_redundant_store() {
+ ExternInt = 3;
+ benchmark::ClobberMemory();
+ ExternInt = 51;
+ // CHECK-DAG: ExternInt
+ // CHECK-DAG: movl $3
+ // CHECK: movl $51
+}
+
+// CHECK-LABEL: test_redundant_read:
+extern "C" void test_redundant_read() {
+ int x;
+ benchmark::DoNotOptimize(&x);
+ x = ExternInt;
+ benchmark::ClobberMemory();
+ x = ExternInt2;
+ // CHECK: leaq [[DEST:[^,]+]], %rax
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, [[DEST]]
+ // CHECK-NOT: ExternInt2
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_redundant_read2:
+extern "C" void test_redundant_read2() {
+ int x;
+ benchmark::DoNotOptimize(&x);
+ x = ExternInt;
+ benchmark::ClobberMemory();
+ x = ExternInt2;
+ benchmark::ClobberMemory();
+ // CHECK: leaq [[DEST:[^,]+]], %rax
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, [[DEST]]
+ // CHECK: ExternInt2(%rip)
+ // CHECK: movl %eax, [[DEST]]
+ // CHECK: ret
+}
diff --git a/third-party/benchmark/test/commandlineflags_gtest.cc b/third-party/benchmark/test/commandlineflags_gtest.cc
new file mode 100644
index 000000000000..8412008ffe35
--- /dev/null
+++ b/third-party/benchmark/test/commandlineflags_gtest.cc
@@ -0,0 +1,228 @@
+#include <cstdlib>
+
+#include "../src/commandlineflags.h"
+#include "../src/internal_macros.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace benchmark {
+namespace {
+
+#if defined(BENCHMARK_OS_WINDOWS)
+int setenv(const char* name, const char* value, int overwrite) {
+ if (!overwrite) {
+ // NOTE: getenv_s is far superior but not available under mingw.
+ char* env_value = getenv(name);
+ if (env_value == nullptr) {
+ return -1;
+ }
+ }
+ return _putenv_s(name, value);
+}
+
+int unsetenv(const char* name) { return _putenv_s(name, ""); }
+
+#endif // BENCHMARK_OS_WINDOWS
+
+TEST(BoolFromEnv, Default) {
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
+ EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
+}
+
+TEST(BoolFromEnv, False) {
+ ASSERT_EQ(setenv("IN_ENV", "0", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "N", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "n", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "No", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "no", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "F", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "f", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "False", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "false", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "off", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", true), false);
+ unsetenv("IN_ENV");
+}
+
+TEST(BoolFromEnv, True) {
+ ASSERT_EQ(setenv("IN_ENV", "1", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "y", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "T", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "t", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "True", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "true", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "On", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+ ASSERT_EQ(setenv("IN_ENV", "on", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+
+#ifndef BENCHMARK_OS_WINDOWS
+ ASSERT_EQ(setenv("IN_ENV", "", 1), 0);
+ EXPECT_EQ(BoolFromEnv("in_env", false), true);
+ unsetenv("IN_ENV");
+#endif
+}
+
+TEST(Int32FromEnv, NotInEnv) {
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
+ EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
+}
+
+TEST(Int32FromEnv, InvalidInteger) {
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
+ EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
+ unsetenv("IN_ENV");
+}
+
+TEST(Int32FromEnv, ValidInteger) {
+ ASSERT_EQ(setenv("IN_ENV", "42", 1), 0);
+ EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
+ unsetenv("IN_ENV");
+}
+
+TEST(DoubleFromEnv, NotInEnv) {
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
+ EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
+}
+
+TEST(DoubleFromEnv, InvalidReal) {
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
+ EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
+ unsetenv("IN_ENV");
+}
+
+TEST(DoubleFromEnv, ValidReal) {
+ ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0);
+ EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
+ unsetenv("IN_ENV");
+}
+
+TEST(StringFromEnv, Default) {
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
+ EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
+}
+
+TEST(StringFromEnv, Valid) {
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
+ EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
+ unsetenv("IN_ENV");
+}
+
+TEST(KvPairsFromEnv, Default) {
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
+ EXPECT_THAT(KvPairsFromEnv("not_in_env", {{"foo", "bar"}}),
+ testing::ElementsAre(testing::Pair("foo", "bar")));
+}
+
+TEST(KvPairsFromEnv, MalformedReturnsDefault) {
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
+ EXPECT_THAT(KvPairsFromEnv("in_env", {{"foo", "bar"}}),
+ testing::ElementsAre(testing::Pair("foo", "bar")));
+ unsetenv("IN_ENV");
+}
+
+TEST(KvPairsFromEnv, Single) {
+ ASSERT_EQ(setenv("IN_ENV", "foo=bar", 1), 0);
+ EXPECT_THAT(KvPairsFromEnv("in_env", {}),
+ testing::ElementsAre(testing::Pair("foo", "bar")));
+ unsetenv("IN_ENV");
+}
+
+TEST(KvPairsFromEnv, Multiple) {
+ ASSERT_EQ(setenv("IN_ENV", "foo=bar,baz=qux", 1), 0);
+ EXPECT_THAT(KvPairsFromEnv("in_env", {}),
+ testing::UnorderedElementsAre(testing::Pair("foo", "bar"),
+ testing::Pair("baz", "qux")));
+ unsetenv("IN_ENV");
+}
+
+} // namespace
+} // namespace benchmark
diff --git a/third-party/benchmark/test/complexity_test.cc b/third-party/benchmark/test/complexity_test.cc
new file mode 100644
index 000000000000..0de73c5722b5
--- /dev/null
+++ b/third-party/benchmark/test/complexity_test.cc
@@ -0,0 +1,222 @@
+#undef NDEBUG
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstdlib>
+#include <vector>
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+namespace {
+
+#define ADD_COMPLEXITY_CASES(...) \
+ int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
+
+int AddComplexityTest(std::string test_name, std::string big_o_test_name,
+ std::string rms_test_name, std::string big_o,
+ int family_index) {
+ SetSubstitutions({{"%name", test_name},
+ {"%bigo_name", big_o_test_name},
+ {"%rms_name", rms_test_name},
+ {"%bigo_str", "[ ]* %float " + big_o},
+ {"%bigo", big_o},
+ {"%rms", "[ ]*[0-9]+ %"}});
+ AddCases(
+ TC_ConsoleOut,
+ {{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
+ {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
+ {"^%rms_name %rms %rms[ ]*$", MR_Next}});
+ AddCases(
+ TC_JSONOut,
+ {{"\"name\": \"%bigo_name\",$"},
+ {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": %int,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"BigO\",$", MR_Next},
+ {"\"cpu_coefficient\": %float,$", MR_Next},
+ {"\"real_coefficient\": %float,$", MR_Next},
+ {"\"big_o\": \"%bigo\",$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next},
+ {"\"name\": \"%rms_name\",$"},
+ {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": %int,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"RMS\",$", MR_Next},
+ {"\"rms\": %float$", MR_Next},
+ {"}", MR_Next}});
+ AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
+ {"^\"%bigo_name\"", MR_Not},
+ {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
+ return 0;
+}
+
+} // end namespace
+
+// ========================================================================= //
+// --------------------------- Testing BigO O(1) --------------------------- //
+// ========================================================================= //
+
+void BM_Complexity_O1(benchmark::State& state) {
+ for (auto _ : state) {
+ for (int i = 0; i < 1024; ++i) {
+ benchmark::DoNotOptimize(&i);
+ }
+ }
+ state.SetComplexityN(state.range(0));
+}
+BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
+BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
+BENCHMARK(BM_Complexity_O1)
+ ->Range(1, 1 << 18)
+ ->Complexity([](benchmark::IterationCount) { return 1.0; });
+
+const char *one_test_name = "BM_Complexity_O1";
+const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
+const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
+const char *enum_big_o_1 = "\\([0-9]+\\)";
+// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
+// deduced.
+// See https://github.com/google/benchmark/issues/272
+const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
+const char *lambda_big_o_1 = "f\\(N\\)";
+
+// Add enum tests
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ enum_big_o_1, /*family_index=*/0);
+
+// Add auto enum tests
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ auto_big_o_1, /*family_index=*/1);
+
+// Add lambda tests
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ lambda_big_o_1, /*family_index=*/2);
+
+// ========================================================================= //
+// --------------------------- Testing BigO O(N) --------------------------- //
+// ========================================================================= //
+
+std::vector<int> ConstructRandomVector(int64_t size) {
+ std::vector<int> v;
+ v.reserve(static_cast<int>(size));
+ for (int i = 0; i < size; ++i) {
+ v.push_back(static_cast<int>(std::rand() % size));
+ }
+ return v;
+}
+
+void BM_Complexity_O_N(benchmark::State& state) {
+ auto v = ConstructRandomVector(state.range(0));
+ // Test worst case scenario (item not in vector)
+ const int64_t item_not_in_vector = state.range(0) * 2;
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
+ }
+ state.SetComplexityN(state.range(0));
+}
+BENCHMARK(BM_Complexity_O_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity(benchmark::oN);
+BENCHMARK(BM_Complexity_O_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity([](benchmark::IterationCount n) -> double {
+ return static_cast<double>(n);
+ });
+BENCHMARK(BM_Complexity_O_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity();
+
+const char *n_test_name = "BM_Complexity_O_N";
+const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
+const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
+const char *enum_auto_big_o_n = "N";
+const char *lambda_big_o_n = "f\\(N\\)";
+
+// Add enum tests
+ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
+ enum_auto_big_o_n, /*family_index=*/3);
+
+// Add lambda tests
+ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
+ lambda_big_o_n, /*family_index=*/4);
+
+// ========================================================================= //
+// ------------------------- Testing BigO O(N*lgN) ------------------------- //
+// ========================================================================= //
+
+static void BM_Complexity_O_N_log_N(benchmark::State& state) {
+ auto v = ConstructRandomVector(state.range(0));
+ for (auto _ : state) {
+ std::sort(v.begin(), v.end());
+ }
+ state.SetComplexityN(state.range(0));
+}
+static const double kLog2E = 1.44269504088896340736;
+BENCHMARK(BM_Complexity_O_N_log_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity(benchmark::oNLogN);
+BENCHMARK(BM_Complexity_O_N_log_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity([](benchmark::IterationCount n) {
+ return kLog2E * n * log(static_cast<double>(n));
+ });
+BENCHMARK(BM_Complexity_O_N_log_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 16)
+ ->Complexity();
+
+const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
+const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
+const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
+const char *enum_auto_big_o_n_lg_n = "NlgN";
+const char *lambda_big_o_n_lg_n = "f\\(N\\)";
+
+// Add enum tests
+ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
+ rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
+ /*family_index=*/6);
+
+// Add lambda tests
+ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
+ rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
+ /*family_index=*/7);
+
+// ========================================================================= //
+// -------- Testing formatting of Complexity with captured args ------------ //
+// ========================================================================= //
+
+void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ state.SetComplexityN(n);
+}
+
+BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
+ ->Complexity(benchmark::oN)
+ ->Ranges({{1, 2}, {3, 4}});
+
+const std::string complexity_capture_name =
+ "BM_ComplexityCaptureArgs/capture_test";
+
+ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
+ complexity_capture_name + "_RMS", "N", /*family_index=*/9);
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/cxx03_test.cc b/third-party/benchmark/test/cxx03_test.cc
new file mode 100644
index 000000000000..c4c9a52273e3
--- /dev/null
+++ b/third-party/benchmark/test/cxx03_test.cc
@@ -0,0 +1,63 @@
+#undef NDEBUG
+#include <cassert>
+#include <cstddef>
+
+#include "benchmark/benchmark.h"
+
+#if __cplusplus >= 201103L
+#error C++11 or greater detected. Should be C++03.
+#endif
+
+#ifdef BENCHMARK_HAS_CXX11
+#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
+#endif
+
+void BM_empty(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ volatile benchmark::IterationCount x = state.iterations();
+ ((void)x);
+ }
+}
+BENCHMARK(BM_empty);
+
+// The new C++11 interface for args/ranges requires initializer list support.
+// Therefore we provide the old interface to support C++03.
+void BM_old_arg_range_interface(benchmark::State& state) {
+ assert((state.range(0) == 1 && state.range(1) == 2) ||
+ (state.range(0) == 5 && state.range(1) == 6));
+ while (state.KeepRunning()) {
+ }
+}
+BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
+
+template <class T, class U>
+void BM_template2(benchmark::State& state) {
+ BM_empty(state);
+}
+BENCHMARK_TEMPLATE2(BM_template2, int, long);
+
+template <class T>
+void BM_template1(benchmark::State& state) {
+ BM_empty(state);
+}
+BENCHMARK_TEMPLATE(BM_template1, long);
+BENCHMARK_TEMPLATE1(BM_template1, int);
+
+template <class T>
+struct BM_Fixture : public ::benchmark::Fixture {
+};
+
+BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
+ BM_empty(state);
+}
+BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
+ BM_empty(state);
+}
+
+void BM_counters(benchmark::State& state) {
+ BM_empty(state);
+ state.counters["Foo"] = 2;
+}
+BENCHMARK(BM_counters);
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/diagnostics_test.cc b/third-party/benchmark/test/diagnostics_test.cc
new file mode 100644
index 000000000000..dd64a3365531
--- /dev/null
+++ b/third-party/benchmark/test/diagnostics_test.cc
@@ -0,0 +1,80 @@
+// Testing:
+// State::PauseTiming()
+// State::ResumeTiming()
+// Test that CHECK's within these function diagnose when they are called
+// outside of the KeepRunning() loop.
+//
+// NOTE: Users should NOT include or use src/check.h. This is only done in
+// order to test library internals.
+
+#include <cstdlib>
+#include <stdexcept>
+
+#include "../src/check.h"
+#include "benchmark/benchmark.h"
+
+#if defined(__GNUC__) && !defined(__EXCEPTIONS)
+#define TEST_HAS_NO_EXCEPTIONS
+#endif
+
+void TestHandler() {
+#ifndef TEST_HAS_NO_EXCEPTIONS
+ throw std::logic_error("");
+#else
+ std::abort();
+#endif
+}
+
+void try_invalid_pause_resume(benchmark::State& state) {
+#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
+ try {
+ state.PauseTiming();
+ std::abort();
+ } catch (std::logic_error const&) {
+ }
+ try {
+ state.ResumeTiming();
+ std::abort();
+ } catch (std::logic_error const&) {
+ }
+#else
+ (void)state; // avoid unused warning
+#endif
+}
+
+void BM_diagnostic_test(benchmark::State& state) {
+ static bool called_once = false;
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ called_once = true;
+}
+BENCHMARK(BM_diagnostic_test);
+
+
+void BM_diagnostic_test_keep_running(benchmark::State& state) {
+ static bool called_once = false;
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ while(state.KeepRunning()) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ called_once = true;
+}
+BENCHMARK(BM_diagnostic_test_keep_running);
+
+int main(int argc, char* argv[]) {
+ benchmark::internal::GetAbortHandler() = &TestHandler;
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
diff --git a/third-party/benchmark/test/display_aggregates_only_test.cc b/third-party/benchmark/test/display_aggregates_only_test.cc
new file mode 100644
index 000000000000..3c36d3f03c11
--- /dev/null
+++ b/third-party/benchmark/test/display_aggregates_only_test.cc
@@ -0,0 +1,43 @@
+
+#undef NDEBUG
+#include <cstdio>
+#include <string>
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// Ok this test is super ugly. We want to check what happens with the file
+// reporter in the presence of DisplayAggregatesOnly().
+// We do not care about console output, the normal tests check that already.
+
+void BM_SummaryRepeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
+
+int main(int argc, char* argv[]) {
+ const std::string output = GetFileReporterOutput(argc, argv);
+
+ if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
+ 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
+ 1) {
+ std::cout << "Precondition mismatch. Expected to only find 6 "
+ "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
+ "output:\n";
+ std::cout << output;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/third-party/benchmark/test/donotoptimize_assembly_test.cc b/third-party/benchmark/test/donotoptimize_assembly_test.cc
new file mode 100644
index 000000000000..d4b0bab70e77
--- /dev/null
+++ b/third-party/benchmark/test/donotoptimize_assembly_test.cc
@@ -0,0 +1,163 @@
+#include <benchmark/benchmark.h>
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wreturn-type"
+#endif
+
+extern "C" {
+
+extern int ExternInt;
+extern int ExternInt2;
+extern int ExternInt3;
+
+inline int Add42(int x) { return x + 42; }
+
+struct NotTriviallyCopyable {
+ NotTriviallyCopyable();
+ explicit NotTriviallyCopyable(int x) : value(x) {}
+ NotTriviallyCopyable(NotTriviallyCopyable const&);
+ int value;
+};
+
+struct Large {
+ int value;
+ int data[2];
+};
+
+}
+// CHECK-LABEL: test_with_rvalue:
+extern "C" void test_with_rvalue() {
+ benchmark::DoNotOptimize(Add42(0));
+ // CHECK: movl $42, %eax
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_large_rvalue:
+extern "C" void test_with_large_rvalue() {
+ benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_non_trivial_rvalue:
+extern "C" void test_with_non_trivial_rvalue() {
+ benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
+ // CHECK: mov{{l|q}} ExternInt(%rip)
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_lvalue:
+extern "C" void test_with_lvalue() {
+ int x = 101;
+ benchmark::DoNotOptimize(x);
+ // CHECK-GNU: movl $101, %eax
+ // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_large_lvalue:
+extern "C" void test_with_large_lvalue() {
+ Large L{ExternInt, {ExternInt, ExternInt}};
+ benchmark::DoNotOptimize(L);
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_non_trivial_lvalue:
+extern "C" void test_with_non_trivial_lvalue() {
+ NotTriviallyCopyable NTC(ExternInt);
+ benchmark::DoNotOptimize(NTC);
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_const_lvalue:
+extern "C" void test_with_const_lvalue() {
+ const int x = 123;
+ benchmark::DoNotOptimize(x);
+ // CHECK: movl $123, %eax
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_large_const_lvalue:
+extern "C" void test_with_large_const_lvalue() {
+ const Large L{ExternInt, {ExternInt, ExternInt}};
+ benchmark::DoNotOptimize(L);
+ // CHECK: ExternInt(%rip)
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_with_non_trivial_const_lvalue:
+extern "C" void test_with_non_trivial_const_lvalue() {
+ const NotTriviallyCopyable Obj(ExternInt);
+ benchmark::DoNotOptimize(Obj);
+ // CHECK: mov{{q|l}} ExternInt(%rip)
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_div_by_two:
+extern "C" int test_div_by_two(int input) {
+ int divisor = 2;
+ benchmark::DoNotOptimize(divisor);
+ return input / divisor;
+ // CHECK: movl $2, [[DEST:.*]]
+ // CHECK: idivl [[DEST]]
+ // CHECK: ret
+}
+
+// CHECK-LABEL: test_inc_integer:
+extern "C" int test_inc_integer() {
+ int x = 0;
+ for (int i=0; i < 5; ++i)
+ benchmark::DoNotOptimize(++x);
+ // CHECK: movl $1, [[DEST:.*]]
+ // CHECK: {{(addl \$1,|incl)}} [[DEST]]
+ // CHECK: {{(addl \$1,|incl)}} [[DEST]]
+ // CHECK: {{(addl \$1,|incl)}} [[DEST]]
+ // CHECK: {{(addl \$1,|incl)}} [[DEST]]
+ // CHECK-CLANG: movl [[DEST]], %eax
+ // CHECK: ret
+ return x;
+}
+
+// CHECK-LABEL: test_pointer_rvalue
+extern "C" void test_pointer_rvalue() {
+ // CHECK: movl $42, [[DEST:.*]]
+ // CHECK: leaq [[DEST]], %rax
+ // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: ret
+ int x = 42;
+ benchmark::DoNotOptimize(&x);
+}
+
+// CHECK-LABEL: test_pointer_const_lvalue:
+extern "C" void test_pointer_const_lvalue() {
+ // CHECK: movl $42, [[DEST:.*]]
+ // CHECK: leaq [[DEST]], %rax
+ // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
+ // CHECK: ret
+ int x = 42;
+ int * const xp = &x;
+ benchmark::DoNotOptimize(xp);
+}
+
+// CHECK-LABEL: test_pointer_lvalue:
+extern "C" void test_pointer_lvalue() {
+ // CHECK: movl $42, [[DEST:.*]]
+ // CHECK: leaq [[DEST]], %rax
+ // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
+ // CHECK: ret
+ int x = 42;
+ int *xp = &x;
+ benchmark::DoNotOptimize(xp);
+}
diff --git a/third-party/benchmark/test/donotoptimize_test.cc b/third-party/benchmark/test/donotoptimize_test.cc
new file mode 100644
index 000000000000..2ce92d1c72be
--- /dev/null
+++ b/third-party/benchmark/test/donotoptimize_test.cc
@@ -0,0 +1,52 @@
+#include "benchmark/benchmark.h"
+
+#include <cstdint>
+
+namespace {
+#if defined(__GNUC__)
+std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
+#endif
+std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
+}
+
+// Using DoNotOptimize on types like BitRef seem to cause a lot of problems
+// with the inline assembly on both GCC and Clang.
+struct BitRef {
+ int index;
+ unsigned char &byte;
+
+public:
+ static BitRef Make() {
+ static unsigned char arr[2] = {};
+ BitRef b(1, arr[0]);
+ return b;
+ }
+private:
+ BitRef(int i, unsigned char& b) : index(i), byte(b) {}
+};
+
+int main(int, char*[]) {
+ // this test verifies compilation of DoNotOptimize() for some types
+
+ char buffer8[8] = "";
+ benchmark::DoNotOptimize(buffer8);
+
+ char buffer20[20] = "";
+ benchmark::DoNotOptimize(buffer20);
+
+ char buffer1024[1024] = "";
+ benchmark::DoNotOptimize(buffer1024);
+ benchmark::DoNotOptimize(&buffer1024[0]);
+
+ int x = 123;
+ benchmark::DoNotOptimize(x);
+ benchmark::DoNotOptimize(&x);
+ benchmark::DoNotOptimize(x += 42);
+
+ benchmark::DoNotOptimize(double_up(x));
+
+ // These tests are to e
+ benchmark::DoNotOptimize(BitRef::Make());
+ BitRef lval = BitRef::Make();
+ benchmark::DoNotOptimize(lval);
+}
diff --git a/third-party/benchmark/test/filter_test.cc b/third-party/benchmark/test/filter_test.cc
new file mode 100644
index 000000000000..1c198913b36a
--- /dev/null
+++ b/third-party/benchmark/test/filter_test.cc
@@ -0,0 +1,118 @@
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+#include <iostream>
+#include <limits>
+#include <sstream>
+#include <string>
+
+#include "benchmark/benchmark.h"
+
+namespace {
+
+class TestReporter : public benchmark::ConsoleReporter {
+ public:
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ return ConsoleReporter::ReportContext(context);
+ };
+
+ virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ ++count_;
+ max_family_index_ =
+ std::max<size_t>(max_family_index_, report[0].family_index);
+ ConsoleReporter::ReportRuns(report);
+ };
+
+ TestReporter() : count_(0), max_family_index_(0) {}
+
+ virtual ~TestReporter() {}
+
+ size_t GetCount() const { return count_; }
+
+ size_t GetMaxFamilyIndex() const { return max_family_index_; }
+
+ private:
+ mutable size_t count_;
+ mutable size_t max_family_index_;
+};
+
+} // end namespace
+
+static void NoPrefix(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(NoPrefix);
+
+static void BM_Foo(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_Foo);
+
+static void BM_Bar(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_Bar);
+
+static void BM_FooBar(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_FooBar);
+
+static void BM_FooBa(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_FooBa);
+
+int main(int argc, char **argv) {
+ bool list_only = false;
+ for (int i = 0; i < argc; ++i)
+ list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
+ std::string::npos;
+
+ benchmark::Initialize(&argc, argv);
+
+ TestReporter test_reporter;
+ const size_t returned_count =
+ benchmark::RunSpecifiedBenchmarks(&test_reporter);
+
+ if (argc == 2) {
+ // Make sure we ran all of the tests
+ std::stringstream ss(argv[1]);
+ size_t expected_return;
+ ss >> expected_return;
+
+ if (returned_count != expected_return) {
+ std::cerr << "ERROR: Expected " << expected_return
+ << " tests to match the filter but returned_count = "
+ << returned_count << std::endl;
+ return -1;
+ }
+
+ const size_t expected_reports = list_only ? 0 : expected_return;
+ const size_t reports_count = test_reporter.GetCount();
+ if (reports_count != expected_reports) {
+ std::cerr << "ERROR: Expected " << expected_reports
+ << " tests to be run but reported_count = " << reports_count
+ << std::endl;
+ return -1;
+ }
+
+ const size_t max_family_index = test_reporter.GetMaxFamilyIndex();
+ const size_t num_families = reports_count == 0 ? 0 : 1 + max_family_index;
+ if (num_families != expected_reports) {
+ std::cerr << "ERROR: Expected " << expected_reports
+ << " test families to be run but num_families = "
+ << num_families << std::endl;
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/third-party/benchmark/test/fixture_test.cc b/third-party/benchmark/test/fixture_test.cc
new file mode 100644
index 000000000000..eba0a42d9cb0
--- /dev/null
+++ b/third-party/benchmark/test/fixture_test.cc
@@ -0,0 +1,51 @@
+
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <memory>
+
+#define FIXTURE_BECHMARK_NAME MyFixture
+
+class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture {
+ public:
+ void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ if (state.thread_index == 0) {
+ assert(data.get() == nullptr);
+ data.reset(new int(42));
+ }
+ }
+
+ void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ if (state.thread_index == 0) {
+ assert(data.get() != nullptr);
+ data.reset();
+ }
+ }
+
+ ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); }
+
+ std::unique_ptr<int> data;
+};
+
+BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) {
+ assert(data.get() != nullptr);
+ assert(*data == 42);
+ for (auto _ : st) {
+ }
+}
+
+BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) {
+ if (st.thread_index == 0) {
+ assert(data.get() != nullptr);
+ assert(*data == 42);
+ }
+ for (auto _ : st) {
+ assert(data.get() != nullptr);
+ assert(*data == 42);
+ }
+ st.SetItemsProcessed(st.range(0));
+}
+BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42);
+BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42)->ThreadPerCpu();
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/internal_threading_test.cc b/third-party/benchmark/test/internal_threading_test.cc
new file mode 100644
index 000000000000..039d7c14a8c4
--- /dev/null
+++ b/third-party/benchmark/test/internal_threading_test.cc
@@ -0,0 +1,184 @@
+
+#undef NDEBUG
+
+#include <chrono>
+#include <thread>
+#include "../src/timers.h"
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+static const std::chrono::duration<double, std::milli> time_frame(50);
+static const double time_frame_in_sec(
+ std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(
+ time_frame)
+ .count());
+
+void MyBusySpinwait() {
+ const auto start = benchmark::ChronoClockNow();
+
+ while (true) {
+ const auto now = benchmark::ChronoClockNow();
+ const auto elapsed = now - start;
+
+ if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >=
+ time_frame)
+ return;
+ }
+}
+
+// ========================================================================= //
+// --------------------------- TEST CASES BEGIN ---------------------------- //
+// ========================================================================= //
+
+// ========================================================================= //
+// BM_MainThread
+
+void BM_MainThread(benchmark::State& state) {
+ for (auto _ : state) {
+ MyBusySpinwait();
+ state.SetIterationTime(time_frame_in_sec);
+ }
+ state.counters["invtime"] =
+ benchmark::Counter{1, benchmark::Counter::kIsRate};
+}
+
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
+BENCHMARK(BM_MainThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_MainThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
+BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
+BENCHMARK(BM_MainThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_MainThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+// ========================================================================= //
+// BM_WorkerThread
+
+void BM_WorkerThread(benchmark::State& state) {
+ for (auto _ : state) {
+ std::thread Worker(&MyBusySpinwait);
+ Worker.join();
+ state.SetIterationTime(time_frame_in_sec);
+ }
+ state.counters["invtime"] =
+ benchmark::Counter{1, benchmark::Counter::kIsRate};
+}
+
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
+BENCHMARK(BM_WorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_WorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
+BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
+BENCHMARK(BM_WorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_WorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+// ========================================================================= //
+// BM_MainThreadAndWorkerThread
+
+void BM_MainThreadAndWorkerThread(benchmark::State& state) {
+ for (auto _ : state) {
+ std::thread Worker(&MyBusySpinwait);
+ MyBusySpinwait();
+ Worker.join();
+ state.SetIterationTime(time_frame_in_sec);
+ }
+ state.counters["invtime"] =
+ benchmark::Counter{1, benchmark::Counter::kIsRate};
+}
+
+BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->UseRealTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->UseManualTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(1)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->UseRealTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->UseManualTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseRealTime();
+BENCHMARK(BM_MainThreadAndWorkerThread)
+ ->Iterations(1)
+ ->Threads(2)
+ ->MeasureProcessCPUTime()
+ ->UseManualTime();
+
+// ========================================================================= //
+// ---------------------------- TEST CASES END ----------------------------- //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/link_main_test.cc b/third-party/benchmark/test/link_main_test.cc
new file mode 100644
index 000000000000..241ad5c3905e
--- /dev/null
+++ b/third-party/benchmark/test/link_main_test.cc
@@ -0,0 +1,8 @@
+#include "benchmark/benchmark.h"
+
+void BM_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+}
+BENCHMARK(BM_empty);
diff --git a/third-party/benchmark/test/map_test.cc b/third-party/benchmark/test/map_test.cc
new file mode 100644
index 000000000000..86391b36016f
--- /dev/null
+++ b/third-party/benchmark/test/map_test.cc
@@ -0,0 +1,57 @@
+#include "benchmark/benchmark.h"
+
+#include <cstdlib>
+#include <map>
+
+namespace {
+
+std::map<int, int> ConstructRandomMap(int size) {
+ std::map<int, int> m;
+ for (int i = 0; i < size; ++i) {
+ m.insert(std::make_pair(std::rand() % size, std::rand() % size));
+ }
+ return m;
+}
+
+} // namespace
+
+// Basic version.
+static void BM_MapLookup(benchmark::State& state) {
+ const int size = static_cast<int>(state.range(0));
+ std::map<int, int> m;
+ for (auto _ : state) {
+ state.PauseTiming();
+ m = ConstructRandomMap(size);
+ state.ResumeTiming();
+ for (int i = 0; i < size; ++i) {
+ benchmark::DoNotOptimize(m.find(std::rand() % size));
+ }
+ }
+ state.SetItemsProcessed(state.iterations() * size);
+}
+BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
+
+// Using fixtures.
+class MapFixture : public ::benchmark::Fixture {
+ public:
+ void SetUp(const ::benchmark::State& st) BENCHMARK_OVERRIDE {
+ m = ConstructRandomMap(static_cast<int>(st.range(0)));
+ }
+
+ void TearDown(const ::benchmark::State&) BENCHMARK_OVERRIDE { m.clear(); }
+
+ std::map<int, int> m;
+};
+
+BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
+ const int size = static_cast<int>(state.range(0));
+ for (auto _ : state) {
+ for (int i = 0; i < size; ++i) {
+ benchmark::DoNotOptimize(m.find(std::rand() % size));
+ }
+ }
+ state.SetItemsProcessed(state.iterations() * size);
+}
+BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/memory_manager_test.cc b/third-party/benchmark/test/memory_manager_test.cc
new file mode 100644
index 000000000000..f0c192fcbd00
--- /dev/null
+++ b/third-party/benchmark/test/memory_manager_test.cc
@@ -0,0 +1,46 @@
+#include <memory>
+
+#include "../src/check.h"
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+class TestMemoryManager : public benchmark::MemoryManager {
+ void Start() BENCHMARK_OVERRIDE {}
+ void Stop(Result* result) BENCHMARK_OVERRIDE {
+ result->num_allocs = 42;
+ result->max_bytes_used = 42000;
+ }
+};
+
+void BM_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+}
+BENCHMARK(BM_empty);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_empty\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"allocs_per_iter\": %float,$", MR_Next},
+ {"\"max_bytes_used\": 42000$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
+
+int main(int argc, char* argv[]) {
+ std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
+
+ benchmark::RegisterMemoryManager(mm.get());
+ RunOutputTests(argc, argv);
+ benchmark::RegisterMemoryManager(nullptr);
+}
diff --git a/third-party/benchmark/test/multiple_ranges_test.cc b/third-party/benchmark/test/multiple_ranges_test.cc
new file mode 100644
index 000000000000..6b61f3af47bb
--- /dev/null
+++ b/third-party/benchmark/test/multiple_ranges_test.cc
@@ -0,0 +1,96 @@
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <iostream>
+#include <set>
+#include <vector>
+
+class MultipleRangesFixture : public ::benchmark::Fixture {
+ public:
+ MultipleRangesFixture()
+ : expectedValues({{1, 3, 5},
+ {1, 3, 8},
+ {1, 3, 15},
+ {2, 3, 5},
+ {2, 3, 8},
+ {2, 3, 15},
+ {1, 4, 5},
+ {1, 4, 8},
+ {1, 4, 15},
+ {2, 4, 5},
+ {2, 4, 8},
+ {2, 4, 15},
+ {1, 7, 5},
+ {1, 7, 8},
+ {1, 7, 15},
+ {2, 7, 5},
+ {2, 7, 8},
+ {2, 7, 15},
+ {7, 6, 3}}) {}
+
+ void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ std::vector<int64_t> ranges = {state.range(0), state.range(1),
+ state.range(2)};
+
+ assert(expectedValues.find(ranges) != expectedValues.end());
+
+ actualValues.insert(ranges);
+ }
+
+ // NOTE: This is not TearDown as we want to check after _all_ runs are
+ // complete.
+ virtual ~MultipleRangesFixture() {
+ if (actualValues != expectedValues) {
+ std::cout << "EXPECTED\n";
+ for (auto v : expectedValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ std::cout << "ACTUAL\n";
+ for (auto v : actualValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ }
+ }
+
+ std::set<std::vector<int64_t>> expectedValues;
+ std::set<std::vector<int64_t>> actualValues;
+};
+
+BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
+ for (auto _ : state) {
+ int64_t product = state.range(0) * state.range(1) * state.range(2);
+ for (int64_t x = 0; x < product; x++) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+
+BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
+ ->RangeMultiplier(2)
+ ->Ranges({{1, 2}, {3, 7}, {5, 15}})
+ ->Args({7, 6, 3});
+
+void BM_CheckDefaultArgument(benchmark::State& state) {
+ // Test that the 'range()' without an argument is the same as 'range(0)'.
+ assert(state.range() == state.range(0));
+ assert(state.range() != state.range(1));
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
+
+static void BM_MultipleRanges(benchmark::State& st) {
+ for (auto _ : st) {
+ }
+}
+BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/options_test.cc b/third-party/benchmark/test/options_test.cc
new file mode 100644
index 000000000000..9f9a78667c9e
--- /dev/null
+++ b/third-party/benchmark/test/options_test.cc
@@ -0,0 +1,76 @@
+#include "benchmark/benchmark.h"
+#include <chrono>
+#include <thread>
+
+#if defined(NDEBUG)
+#undef NDEBUG
+#endif
+#include <cassert>
+
+void BM_basic(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+
+void BM_basic_slow(benchmark::State& state) {
+ std::chrono::milliseconds sleep_duration(state.range(0));
+ for (auto _ : state) {
+ std::this_thread::sleep_for(
+ std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
+ }
+}
+
+BENCHMARK(BM_basic);
+BENCHMARK(BM_basic)->Arg(42);
+BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
+BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
+BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
+BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kSecond);
+BENCHMARK(BM_basic)->Range(1, 8);
+BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
+BENCHMARK(BM_basic)->DenseRange(10, 15);
+BENCHMARK(BM_basic)->Args({42, 42});
+BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}});
+BENCHMARK(BM_basic)->MinTime(0.7);
+BENCHMARK(BM_basic)->UseRealTime();
+BENCHMARK(BM_basic)->ThreadRange(2, 4);
+BENCHMARK(BM_basic)->ThreadPerCpu();
+BENCHMARK(BM_basic)->Repetitions(3);
+BENCHMARK(BM_basic)
+ ->RangeMultiplier(std::numeric_limits<int>::max())
+ ->Range(std::numeric_limits<int64_t>::min(),
+ std::numeric_limits<int64_t>::max());
+
+// Negative ranges
+BENCHMARK(BM_basic)->Range(-64, -1);
+BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
+BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
+BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
+
+void CustomArgs(benchmark::internal::Benchmark* b) {
+ for (int i = 0; i < 10; ++i) {
+ b->Arg(i);
+ }
+}
+
+BENCHMARK(BM_basic)->Apply(CustomArgs);
+
+void BM_explicit_iteration_count(benchmark::State& state) {
+ // Test that benchmarks specified with an explicit iteration count are
+ // only run once.
+ static bool invoked_before = false;
+ assert(!invoked_before);
+ invoked_before = true;
+
+ // Test that the requested iteration count is respected.
+ assert(state.max_iterations == 42);
+ size_t actual_iterations = 0;
+ for (auto _ : state)
+ ++actual_iterations;
+ assert(state.iterations() == state.max_iterations);
+ assert(state.iterations() == 42);
+
+}
+BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/output_test.h b/third-party/benchmark/test/output_test.h
new file mode 100644
index 000000000000..15368f9b6830
--- /dev/null
+++ b/third-party/benchmark/test/output_test.h
@@ -0,0 +1,213 @@
+#ifndef TEST_OUTPUT_TEST_H
+#define TEST_OUTPUT_TEST_H
+
+#undef NDEBUG
+#include <functional>
+#include <initializer_list>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "../src/re.h"
+#include "benchmark/benchmark.h"
+
+#define CONCAT2(x, y) x##y
+#define CONCAT(x, y) CONCAT2(x, y)
+
+#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
+
+#define SET_SUBSTITUTIONS(...) \
+ int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
+
+enum MatchRules {
+ MR_Default, // Skip non-matching lines until a match is found.
+ MR_Next, // Match must occur on the next line.
+ MR_Not // No line between the current position and the next match matches
+ // the regex
+};
+
+struct TestCase {
+ TestCase(std::string re, int rule = MR_Default);
+
+ std::string regex_str;
+ int match_rule;
+ std::string substituted_regex;
+ std::shared_ptr<benchmark::Regex> regex;
+};
+
+enum TestCaseID {
+ TC_ConsoleOut,
+ TC_ConsoleErr,
+ TC_JSONOut,
+ TC_JSONErr,
+ TC_CSVOut,
+ TC_CSVErr,
+
+ TC_NumID // PRIVATE
+};
+
+// Add a list of test cases to be run against the output specified by
+// 'ID'
+int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
+
+// Add or set a list of substitutions to be performed on constructed regex's
+// See 'output_test_helper.cc' for a list of default substitutions.
+int SetSubstitutions(
+ std::initializer_list<std::pair<std::string, std::string>> il);
+
+// Run all output tests.
+void RunOutputTests(int argc, char* argv[]);
+
+// Count the number of 'pat' substrings in the 'haystack' string.
+int SubstrCnt(const std::string& haystack, const std::string& pat);
+
+// Run registered benchmarks with file reporter enabled, and return the content
+// outputted by the file reporter.
+std::string GetFileReporterOutput(int argc, char* argv[]);
+
+// ========================================================================= //
+// ------------------------- Results checking ------------------------------ //
+// ========================================================================= //
+
+// Call this macro to register a benchmark for checking its results. This
+// should be all that's needed. It subscribes a function to check the (CSV)
+// results of a benchmark. This is done only after verifying that the output
+// strings are really as expected.
+// bm_name_pattern: a name or a regex pattern which will be matched against
+// all the benchmark names. Matching benchmarks
+// will be the subject of a call to checker_function
+// checker_function: should be of type ResultsCheckFn (see below)
+#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
+ size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
+
+struct Results;
+typedef std::function<void(Results const&)> ResultsCheckFn;
+
+size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
+
+// Class holding the results of a benchmark.
+// It is passed in calls to checker functions.
+struct Results {
+ // the benchmark name
+ std::string name;
+ // the benchmark fields
+ std::map<std::string, std::string> values;
+
+ Results(const std::string& n) : name(n) {}
+
+ int NumThreads() const;
+
+ double NumIterations() const;
+
+ typedef enum { kCpuTime, kRealTime } BenchmarkTime;
+
+ // get cpu_time or real_time in seconds
+ double GetTime(BenchmarkTime which) const;
+
+ // get the real_time duration of the benchmark in seconds.
+ // it is better to use fuzzy float checks for this, as the float
+ // ASCII formatting is lossy.
+ double DurationRealTime() const {
+ return NumIterations() * GetTime(kRealTime);
+ }
+ // get the cpu_time duration of the benchmark in seconds
+ double DurationCPUTime() const {
+ return NumIterations() * GetTime(kCpuTime);
+ }
+
+ // get the string for a result by name, or nullptr if the name
+ // is not found
+ const std::string* Get(const char* entry_name) const {
+ auto it = values.find(entry_name);
+ if (it == values.end()) return nullptr;
+ return &it->second;
+ }
+
+ // get a result by name, parsed as a specific type.
+ // NOTE: for counters, use GetCounterAs instead.
+ template <class T>
+ T GetAs(const char* entry_name) const;
+
+ // counters are written as doubles, so they have to be read first
+ // as a double, and only then converted to the asked type.
+ template <class T>
+ T GetCounterAs(const char* entry_name) const {
+ double dval = GetAs<double>(entry_name);
+ T tval = static_cast<T>(dval);
+ return tval;
+ }
+};
+
+template <class T>
+T Results::GetAs(const char* entry_name) const {
+ auto* sv = Get(entry_name);
+ CHECK(sv != nullptr && !sv->empty());
+ std::stringstream ss;
+ ss << *sv;
+ T out;
+ ss >> out;
+ CHECK(!ss.fail());
+ return out;
+}
+
+//----------------------------------
+// Macros to help in result checking. Do not use them with arguments causing
+// side-effects.
+
+// clang-format off
+
+#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \
+ CONCAT(CHECK_, relationship) \
+ (entry.getfn< var_type >(var_name), (value)) << "\n" \
+ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
+ << __FILE__ << ":" << __LINE__ << ": " \
+ << "expected (" << #var_type << ")" << (var_name) \
+ << "=" << (entry).getfn< var_type >(var_name) \
+ << " to be " #relationship " to " << (value) << "\n"
+
+// check with tolerance. eps_factor is the tolerance window, which is
+// interpreted relative to value (eg, 0.1 means 10% of value).
+#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
+ CONCAT(CHECK_FLOAT_, relationship) \
+ (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \
+ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
+ << __FILE__ << ":" << __LINE__ << ": " \
+ << "expected (" << #var_type << ")" << (var_name) \
+ << "=" << (entry).getfn< var_type >(var_name) \
+ << " to be " #relationship " to " << (value) << "\n" \
+ << __FILE__ << ":" << __LINE__ << ": " \
+ << "with tolerance of " << (eps_factor) * (value) \
+ << " (" << (eps_factor)*100. << "%), " \
+ << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \
+ << " (" << (((entry).getfn< var_type >(var_name) - (value)) \
+ / \
+ ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \
+ << "%)"
+
+#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \
+ CHECK_RESULT_VALUE_IMPL(entry, GetAs, var_type, var_name, relationship, value)
+
+#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \
+ CHECK_RESULT_VALUE_IMPL(entry, GetCounterAs, var_type, var_name, relationship, value)
+
+#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \
+ CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetAs, double, var_name, relationship, value, eps_factor)
+
+#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \
+ CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
+
+// clang-format on
+
+// ========================================================================= //
+// --------------------------- Misc Utilities ------------------------------ //
+// ========================================================================= //
+
+namespace {
+
+const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
+
+} // end namespace
+
+#endif // TEST_OUTPUT_TEST_H
diff --git a/third-party/benchmark/test/output_test_helper.cc b/third-party/benchmark/test/output_test_helper.cc
new file mode 100644
index 000000000000..b8ef1205744a
--- /dev/null
+++ b/third-party/benchmark/test/output_test_helper.cc
@@ -0,0 +1,520 @@
+#include <cstdio>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <random>
+#include <sstream>
+#include <streambuf>
+
+#include "../src/benchmark_api_internal.h"
+#include "../src/check.h" // NOTE: check.h is for internal use only!
+#include "../src/re.h" // NOTE: re.h is for internal use only
+#include "output_test.h"
+
+// ========================================================================= //
+// ------------------------------ Internals -------------------------------- //
+// ========================================================================= //
+namespace internal {
+namespace {
+
+using TestCaseList = std::vector<TestCase>;
+
+// Use a vector because the order elements are added matters during iteration.
+// std::map/unordered_map don't guarantee that.
+// For example:
+// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}});
+// Substitute("%HelloWorld") // Always expands to Hello.
+using SubMap = std::vector<std::pair<std::string, std::string>>;
+
+TestCaseList& GetTestCaseList(TestCaseID ID) {
+ // Uses function-local statics to ensure initialization occurs
+ // before first use.
+ static TestCaseList lists[TC_NumID];
+ return lists[ID];
+}
+
+SubMap& GetSubstitutions() {
+ // Don't use 'dec_re' from header because it may not yet be initialized.
+ // clang-format off
+ static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
+ static std::string time_re = "([0-9]+[.])?[0-9]+";
+ static SubMap map = {
+ {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
+ // human-readable float
+ {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
+ {"%int", "[ ]*[0-9]+"},
+ {" %s ", "[ ]+"},
+ {"%time", "[ ]*" + time_re + "[ ]+ns"},
+ {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"},
+ {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
+ {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"},
+ {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"},
+ {"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"},
+ {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
+ {"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"},
+ {"%csv_header",
+ "name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
+ "items_per_second,label,error_occurred,error_message"},
+ {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
+ {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
+ {"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"},
+ {"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"},
+ {"%csv_bytes_report",
+ "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
+ {"%csv_items_report",
+ "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"},
+ {"%csv_bytes_items_report",
+ "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re +
+ "," + safe_dec_re + ",,,"},
+ {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
+ {"%csv_label_report_end", ",,"}};
+ // clang-format on
+ return map;
+}
+
+std::string PerformSubstitutions(std::string source) {
+ SubMap const& subs = GetSubstitutions();
+ using SizeT = std::string::size_type;
+ for (auto const& KV : subs) {
+ SizeT pos;
+ SizeT next_start = 0;
+ while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
+ next_start = pos + KV.second.size();
+ source.replace(pos, KV.first.size(), KV.second);
+ }
+ }
+ return source;
+}
+
+void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
+ TestCaseList const& not_checks) {
+ std::string first_line;
+ bool on_first = true;
+ std::string line;
+ while (remaining_output.eof() == false) {
+ CHECK(remaining_output.good());
+ std::getline(remaining_output, line);
+ if (on_first) {
+ first_line = line;
+ on_first = false;
+ }
+ for (const auto& NC : not_checks) {
+ CHECK(!NC.regex->Match(line))
+ << "Unexpected match for line \"" << line << "\" for MR_Not regex \""
+ << NC.regex_str << "\""
+ << "\n actual regex string \"" << TC.substituted_regex << "\""
+ << "\n started matching near: " << first_line;
+ }
+ if (TC.regex->Match(line)) return;
+ CHECK(TC.match_rule != MR_Next)
+ << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
+ << "\""
+ << "\n actual regex string \"" << TC.substituted_regex << "\""
+ << "\n started matching near: " << first_line;
+ }
+ CHECK(remaining_output.eof() == false)
+ << "End of output reached before match for regex \"" << TC.regex_str
+ << "\" was found"
+ << "\n actual regex string \"" << TC.substituted_regex << "\""
+ << "\n started matching near: " << first_line;
+}
+
+void CheckCases(TestCaseList const& checks, std::stringstream& output) {
+ std::vector<TestCase> not_checks;
+ for (size_t i = 0; i < checks.size(); ++i) {
+ const auto& TC = checks[i];
+ if (TC.match_rule == MR_Not) {
+ not_checks.push_back(TC);
+ continue;
+ }
+ CheckCase(output, TC, not_checks);
+ not_checks.clear();
+ }
+}
+
+class TestReporter : public benchmark::BenchmarkReporter {
+ public:
+ TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
+ : reporters_(reps) {}
+
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ bool last_ret = false;
+ bool first = true;
+ for (auto rep : reporters_) {
+ bool new_ret = rep->ReportContext(context);
+ CHECK(first || new_ret == last_ret)
+ << "Reports return different values for ReportContext";
+ first = false;
+ last_ret = new_ret;
+ }
+ (void)first;
+ return last_ret;
+ }
+
+ void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ for (auto rep : reporters_) rep->ReportRuns(report);
+ }
+ void Finalize() BENCHMARK_OVERRIDE {
+ for (auto rep : reporters_) rep->Finalize();
+ }
+
+ private:
+ std::vector<benchmark::BenchmarkReporter*> reporters_;
+};
+} // namespace
+
+} // end namespace internal
+
+// ========================================================================= //
+// -------------------------- Results checking ----------------------------- //
+// ========================================================================= //
+
+namespace internal {
+
+// Utility class to manage subscribers for checking benchmark results.
+// It works by parsing the CSV output to read the results.
+class ResultsChecker {
+ public:
+ struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
+ PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
+ : TestCase(rx), fn(fn_) {}
+ ResultsCheckFn fn;
+ };
+
+ std::vector<PatternAndFn> check_patterns;
+ std::vector<Results> results;
+ std::vector<std::string> field_names;
+
+ void Add(const std::string& entry_pattern, ResultsCheckFn fn);
+
+ void CheckResults(std::stringstream& output);
+
+ private:
+ void SetHeader_(const std::string& csv_header);
+ void SetValues_(const std::string& entry_csv_line);
+
+ std::vector<std::string> SplitCsv_(const std::string& line);
+};
+
+// store the static ResultsChecker in a function to prevent initialization
+// order problems
+ResultsChecker& GetResultsChecker() {
+ static ResultsChecker rc;
+ return rc;
+}
+
+// add a results checker for a benchmark
+void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
+ check_patterns.emplace_back(entry_pattern, fn);
+}
+
+// check the results of all subscribed benchmarks
+void ResultsChecker::CheckResults(std::stringstream& output) {
+ // first reset the stream to the start
+ {
+ auto start = std::stringstream::pos_type(0);
+ // clear before calling tellg()
+ output.clear();
+ // seek to zero only when needed
+ if (output.tellg() > start) output.seekg(start);
+ // and just in case
+ output.clear();
+ }
+ // now go over every line and publish it to the ResultsChecker
+ std::string line;
+ bool on_first = true;
+ while (output.eof() == false) {
+ CHECK(output.good());
+ std::getline(output, line);
+ if (on_first) {
+ SetHeader_(line); // this is important
+ on_first = false;
+ continue;
+ }
+ SetValues_(line);
+ }
+ // finally we can call the subscribed check functions
+ for (const auto& p : check_patterns) {
+ VLOG(2) << "--------------------------------\n";
+ VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
+ for (const auto& r : results) {
+ if (!p.regex->Match(r.name)) {
+ VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
+ continue;
+ } else {
+ VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
+ }
+ VLOG(1) << "Checking results of " << r.name << ": ... \n";
+ p.fn(r);
+ VLOG(1) << "Checking results of " << r.name << ": OK.\n";
+ }
+ }
+}
+
+// prepare for the names in this header
+void ResultsChecker::SetHeader_(const std::string& csv_header) {
+ field_names = SplitCsv_(csv_header);
+}
+
+// set the values for a benchmark
+void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
+ if (entry_csv_line.empty()) return; // some lines are empty
+ CHECK(!field_names.empty());
+ auto vals = SplitCsv_(entry_csv_line);
+ CHECK_EQ(vals.size(), field_names.size());
+ results.emplace_back(vals[0]); // vals[0] is the benchmark name
+ auto& entry = results.back();
+ for (size_t i = 1, e = vals.size(); i < e; ++i) {
+ entry.values[field_names[i]] = vals[i];
+ }
+}
+
+// a quick'n'dirty csv splitter (eliminating quotes)
+std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
+ std::vector<std::string> out;
+ if (line.empty()) return out;
+ if (!field_names.empty()) out.reserve(field_names.size());
+ size_t prev = 0, pos = line.find_first_of(','), curr = pos;
+ while (pos != line.npos) {
+ CHECK(curr > 0);
+ if (line[prev] == '"') ++prev;
+ if (line[curr - 1] == '"') --curr;
+ out.push_back(line.substr(prev, curr - prev));
+ prev = pos + 1;
+ pos = line.find_first_of(',', pos + 1);
+ curr = pos;
+ }
+ curr = line.size();
+ if (line[prev] == '"') ++prev;
+ if (line[curr - 1] == '"') --curr;
+ out.push_back(line.substr(prev, curr - prev));
+ return out;
+}
+
+} // end namespace internal
+
+size_t AddChecker(const char* bm_name, ResultsCheckFn fn) {
+ auto& rc = internal::GetResultsChecker();
+ rc.Add(bm_name, fn);
+ return rc.results.size();
+}
+
+int Results::NumThreads() const {
+ auto pos = name.find("/threads:");
+ if (pos == name.npos) return 1;
+ auto end = name.find('/', pos + 9);
+ std::stringstream ss;
+ ss << name.substr(pos + 9, end);
+ int num = 1;
+ ss >> num;
+ CHECK(!ss.fail());
+ return num;
+}
+
+double Results::NumIterations() const {
+ return GetAs<double>("iterations");
+}
+
+double Results::GetTime(BenchmarkTime which) const {
+ CHECK(which == kCpuTime || which == kRealTime);
+ const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
+ double val = GetAs<double>(which_str);
+ auto unit = Get("time_unit");
+ CHECK(unit);
+ if (*unit == "ns") {
+ return val * 1.e-9;
+ } else if (*unit == "us") {
+ return val * 1.e-6;
+ } else if (*unit == "ms") {
+ return val * 1.e-3;
+ } else if (*unit == "s") {
+ return val;
+ } else {
+ CHECK(1 == 0) << "unknown time unit: " << *unit;
+ return 0;
+ }
+}
+
+// ========================================================================= //
+// -------------------------- Public API Definitions------------------------ //
+// ========================================================================= //
+
+TestCase::TestCase(std::string re, int rule)
+ : regex_str(std::move(re)),
+ match_rule(rule),
+ substituted_regex(internal::PerformSubstitutions(regex_str)),
+ regex(std::make_shared<benchmark::Regex>()) {
+ std::string err_str;
+ regex->Init(substituted_regex, &err_str);
+ CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
+ << "\""
+ << "\n originally \"" << regex_str << "\""
+ << "\n got error: " << err_str;
+}
+
+int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
+ auto& L = internal::GetTestCaseList(ID);
+ L.insert(L.end(), il);
+ return 0;
+}
+
+int SetSubstitutions(
+ std::initializer_list<std::pair<std::string, std::string>> il) {
+ auto& subs = internal::GetSubstitutions();
+ for (auto KV : il) {
+ bool exists = false;
+ KV.second = internal::PerformSubstitutions(KV.second);
+ for (auto& EKV : subs) {
+ if (EKV.first == KV.first) {
+ EKV.second = std::move(KV.second);
+ exists = true;
+ break;
+ }
+ }
+ if (!exists) subs.push_back(std::move(KV));
+ }
+ return 0;
+}
+
+// Disable deprecated warnings temporarily because we need to reference
+// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+void RunOutputTests(int argc, char* argv[]) {
+ using internal::GetTestCaseList;
+ benchmark::Initialize(&argc, argv);
+ auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
+ benchmark::ConsoleReporter CR(options);
+ benchmark::JSONReporter JR;
+ benchmark::CSVReporter CSVR;
+ struct ReporterTest {
+ const char* name;
+ std::vector<TestCase>& output_cases;
+ std::vector<TestCase>& error_cases;
+ benchmark::BenchmarkReporter& reporter;
+ std::stringstream out_stream;
+ std::stringstream err_stream;
+
+ ReporterTest(const char* n, std::vector<TestCase>& out_tc,
+ std::vector<TestCase>& err_tc,
+ benchmark::BenchmarkReporter& br)
+ : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
+ reporter.SetOutputStream(&out_stream);
+ reporter.SetErrorStream(&err_stream);
+ }
+ } TestCases[] = {
+ {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
+ GetTestCaseList(TC_ConsoleErr), CR},
+ {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
+ JR},
+ {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
+ CSVR},
+ };
+
+ // Create the test reporter and run the benchmarks.
+ std::cout << "Running benchmarks...\n";
+ internal::TestReporter test_rep({&CR, &JR, &CSVR});
+ benchmark::RunSpecifiedBenchmarks(&test_rep);
+
+ for (auto& rep_test : TestCases) {
+ std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
+ std::string banner(msg.size() - 1, '-');
+ std::cout << banner << msg << banner << "\n";
+
+ std::cerr << rep_test.err_stream.str();
+ std::cout << rep_test.out_stream.str();
+
+ internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
+ internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
+
+ std::cout << "\n";
+ }
+
+ // now that we know the output is as expected, we can dispatch
+ // the checks to subscribees.
+ auto& csv = TestCases[2];
+ // would use == but gcc spits a warning
+ CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
+ internal::GetResultsChecker().CheckResults(csv.out_stream);
+}
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+
+int SubstrCnt(const std::string& haystack, const std::string& pat) {
+ if (pat.length() == 0) return 0;
+ int count = 0;
+ for (size_t offset = haystack.find(pat); offset != std::string::npos;
+ offset = haystack.find(pat, offset + pat.length()))
+ ++count;
+ return count;
+}
+
+static char ToHex(int ch) {
+ return ch < 10 ? static_cast<char>('0' + ch)
+ : static_cast<char>('a' + (ch - 10));
+}
+
+static char RandomHexChar() {
+ static std::mt19937 rd{std::random_device{}()};
+ static std::uniform_int_distribution<int> mrand{0, 15};
+ return ToHex(mrand(rd));
+}
+
+static std::string GetRandomFileName() {
+ std::string model = "test.%%%%%%";
+ for (auto & ch : model) {
+ if (ch == '%')
+ ch = RandomHexChar();
+ }
+ return model;
+}
+
+static bool FileExists(std::string const& name) {
+ std::ifstream in(name.c_str());
+ return in.good();
+}
+
+static std::string GetTempFileName() {
+ // This function attempts to avoid race conditions where two tests
+ // create the same file at the same time. However, it still introduces races
+ // similar to tmpnam.
+ int retries = 3;
+ while (--retries) {
+ std::string name = GetRandomFileName();
+ if (!FileExists(name))
+ return name;
+ }
+ std::cerr << "Failed to create unique temporary file name" << std::endl;
+ std::abort();
+}
+
+std::string GetFileReporterOutput(int argc, char* argv[]) {
+ std::vector<char*> new_argv(argv, argv + argc);
+ assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
+
+ std::string tmp_file_name = GetTempFileName();
+ std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
+
+ std::string tmp = "--benchmark_out=";
+ tmp += tmp_file_name;
+ new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
+
+ argc = int(new_argv.size());
+
+ benchmark::Initialize(&argc, new_argv.data());
+ benchmark::RunSpecifiedBenchmarks();
+
+ // Read the output back from the file, and delete the file.
+ std::ifstream tmp_stream(tmp_file_name);
+ std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
+ std::istreambuf_iterator<char>());
+ std::remove(tmp_file_name.c_str());
+
+ return output;
+}
diff --git a/third-party/benchmark/test/perf_counters_gtest.cc b/third-party/benchmark/test/perf_counters_gtest.cc
new file mode 100644
index 000000000000..2a2868a71536
--- /dev/null
+++ b/third-party/benchmark/test/perf_counters_gtest.cc
@@ -0,0 +1,145 @@
+#include <thread>
+
+#include "../src/perf_counters.h"
+#include "gtest/gtest.h"
+
+#ifndef GTEST_SKIP
+struct MsgHandler {
+ void operator=(std::ostream&){}
+};
+#define GTEST_SKIP() return MsgHandler() = std::cout
+#endif
+
+using benchmark::internal::PerfCounters;
+using benchmark::internal::PerfCounterValues;
+
+namespace {
+const char kGenericPerfEvent1[] = "CYCLES";
+const char kGenericPerfEvent2[] = "BRANCHES";
+const char kGenericPerfEvent3[] = "INSTRUCTIONS";
+
+TEST(PerfCountersTest, Init) {
+ EXPECT_EQ(PerfCounters::Initialize(), PerfCounters::kSupported);
+}
+
+TEST(PerfCountersTest, OneCounter) {
+ if (!PerfCounters::kSupported) {
+ GTEST_SKIP() << "Performance counters not supported.\n";
+ }
+ EXPECT_TRUE(PerfCounters::Initialize());
+ EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1}).IsValid());
+}
+
+TEST(PerfCountersTest, NegativeTest) {
+ if (!PerfCounters::kSupported) {
+ EXPECT_FALSE(PerfCounters::Initialize());
+ return;
+ }
+ EXPECT_TRUE(PerfCounters::Initialize());
+ EXPECT_FALSE(PerfCounters::Create({}).IsValid());
+ EXPECT_FALSE(PerfCounters::Create({""}).IsValid());
+ EXPECT_FALSE(PerfCounters::Create({"not a counter name"}).IsValid());
+ {
+ EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
+ kGenericPerfEvent3})
+ .IsValid());
+ }
+ EXPECT_FALSE(
+ PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1})
+ .IsValid());
+ EXPECT_FALSE(PerfCounters::Create({kGenericPerfEvent3, "not a counter name",
+ kGenericPerfEvent1})
+ .IsValid());
+ {
+ EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
+ kGenericPerfEvent3})
+ .IsValid());
+ }
+ EXPECT_FALSE(
+ PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
+ kGenericPerfEvent3, "MISPREDICTED_BRANCH_RETIRED"})
+ .IsValid());
+}
+
+TEST(PerfCountersTest, Read1Counter) {
+ if (!PerfCounters::kSupported) {
+ GTEST_SKIP() << "Test skipped because libpfm is not supported.\n";
+ }
+ EXPECT_TRUE(PerfCounters::Initialize());
+ auto counters = PerfCounters::Create({kGenericPerfEvent1});
+ EXPECT_TRUE(counters.IsValid());
+ PerfCounterValues values1(1);
+ EXPECT_TRUE(counters.Snapshot(&values1));
+ EXPECT_GT(values1[0], 0);
+ PerfCounterValues values2(1);
+ EXPECT_TRUE(counters.Snapshot(&values2));
+ EXPECT_GT(values2[0], 0);
+ EXPECT_GT(values2[0], values1[0]);
+}
+
+TEST(PerfCountersTest, Read2Counters) {
+ if (!PerfCounters::kSupported) {
+ GTEST_SKIP() << "Test skipped because libpfm is not supported.\n";
+ }
+ EXPECT_TRUE(PerfCounters::Initialize());
+ auto counters =
+ PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2});
+ EXPECT_TRUE(counters.IsValid());
+ PerfCounterValues values1(2);
+ EXPECT_TRUE(counters.Snapshot(&values1));
+ EXPECT_GT(values1[0], 0);
+ EXPECT_GT(values1[1], 0);
+ PerfCounterValues values2(2);
+ EXPECT_TRUE(counters.Snapshot(&values2));
+ EXPECT_GT(values2[0], 0);
+ EXPECT_GT(values2[1], 0);
+}
+
+size_t do_work() {
+ size_t res = 0;
+ for (size_t i = 0; i < 100000000; ++i) res += i * i;
+ return res;
+}
+
+void measure(size_t threadcount, PerfCounterValues* values1,
+ PerfCounterValues* values2) {
+ CHECK_NE(values1, nullptr);
+ CHECK_NE(values2, nullptr);
+ std::vector<std::thread> threads(threadcount);
+ auto work = [&]() { CHECK(do_work() > 1000); };
+
+ // We need to first set up the counters, then start the threads, so the
+ // threads would inherit the counters. But later, we need to first destroy the
+ // thread pool (so all the work finishes), then measure the counters. So the
+ // scopes overlap, and we need to explicitly control the scope of the
+ // threadpool.
+ auto counters =
+ PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent3});
+ for (auto& t : threads) t = std::thread(work);
+ counters.Snapshot(values1);
+ for (auto& t : threads) t.join();
+ counters.Snapshot(values2);
+}
+
+TEST(PerfCountersTest, MultiThreaded) {
+ if (!PerfCounters::kSupported) {
+ GTEST_SKIP() << "Test skipped because libpfm is not supported.";
+ }
+ EXPECT_TRUE(PerfCounters::Initialize());
+ PerfCounterValues values1(2);
+ PerfCounterValues values2(2);
+
+ measure(2, &values1, &values2);
+ std::vector<double> D1{static_cast<double>(values2[0] - values1[0]),
+ static_cast<double>(values2[1] - values1[1])};
+
+ measure(4, &values1, &values2);
+ std::vector<double> D2{static_cast<double>(values2[0] - values1[0]),
+ static_cast<double>(values2[1] - values1[1])};
+
+ // Some extra work will happen on the main thread - like joining the threads
+ // - so the ratio won't be quite 2.0, but very close.
+ EXPECT_GE(D2[0], 1.9 * D1[0]);
+ EXPECT_GE(D2[1], 1.9 * D1[1]);
+}
+} // namespace
diff --git a/third-party/benchmark/test/perf_counters_test.cc b/third-party/benchmark/test/perf_counters_test.cc
new file mode 100644
index 000000000000..d6e0284d4d4b
--- /dev/null
+++ b/third-party/benchmark/test/perf_counters_test.cc
@@ -0,0 +1,27 @@
+#undef NDEBUG
+
+#include "../src/perf_counters.h"
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+void BM_Simple(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+}
+BENCHMARK(BM_Simple);
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Simple\",$"}});
+
+void CheckSimple(Results const& e) {
+ CHECK_COUNTER_VALUE(e, double, "CYCLES", GT, 0);
+ CHECK_COUNTER_VALUE(e, double, "BRANCHES", GT, 0.0);
+}
+CHECK_BENCHMARK_RESULTS("BM_Simple", &CheckSimple);
+
+int main(int argc, char* argv[]) {
+ if (!benchmark::internal::PerfCounters::kSupported) {
+ return 0;
+ }
+ RunOutputTests(argc, argv);
+}
diff --git a/third-party/benchmark/test/register_benchmark_test.cc b/third-party/benchmark/test/register_benchmark_test.cc
new file mode 100644
index 000000000000..c027eabacae0
--- /dev/null
+++ b/third-party/benchmark/test/register_benchmark_test.cc
@@ -0,0 +1,184 @@
+
+#undef NDEBUG
+#include <cassert>
+#include <vector>
+
+#include "../src/check.h" // NOTE: check.h is for internal use only!
+#include "benchmark/benchmark.h"
+
+namespace {
+
+class TestReporter : public benchmark::ConsoleReporter {
+ public:
+ virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ all_runs_.insert(all_runs_.end(), begin(report), end(report));
+ ConsoleReporter::ReportRuns(report);
+ }
+
+ std::vector<Run> all_runs_;
+};
+
+struct TestCase {
+ std::string name;
+ const char* label;
+ // Note: not explicit as we rely on it being converted through ADD_CASES.
+ TestCase(const char* xname) : TestCase(xname, nullptr) {}
+ TestCase(const char* xname, const char* xlabel)
+ : name(xname), label(xlabel) {}
+
+ typedef benchmark::BenchmarkReporter::Run Run;
+
+ void CheckRun(Run const& run) const {
+ // clang-format off
+ CHECK(name == run.benchmark_name()) << "expected " << name << " got "
+ << run.benchmark_name();
+ if (label) {
+ CHECK(run.report_label == label) << "expected " << label << " got "
+ << run.report_label;
+ } else {
+ CHECK(run.report_label == "");
+ }
+ // clang-format on
+ }
+};
+
+std::vector<TestCase> ExpectedResults;
+
+int AddCases(std::initializer_list<TestCase> const& v) {
+ for (auto N : v) {
+ ExpectedResults.push_back(N);
+ }
+ return 0;
+}
+
+#define CONCAT(x, y) CONCAT2(x, y)
+#define CONCAT2(x, y) x##y
+#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})
+
+} // end namespace
+
+typedef benchmark::internal::Benchmark* ReturnVal;
+
+//----------------------------------------------------------------------------//
+// Test RegisterBenchmark with no additional arguments
+//----------------------------------------------------------------------------//
+void BM_function(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_function);
+ReturnVal dummy = benchmark::RegisterBenchmark(
+ "BM_function_manual_registration", BM_function);
+ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
+
+//----------------------------------------------------------------------------//
+// Test RegisterBenchmark with additional arguments
+// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they
+// reject the variadic pack expansion of lambda captures.
+//----------------------------------------------------------------------------//
+#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
+
+void BM_extra_args(benchmark::State& st, const char* label) {
+ for (auto _ : st) {
+ }
+ st.SetLabel(label);
+}
+int RegisterFromFunction() {
+ std::pair<const char*, const char*> cases[] = {
+ {"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
+ for (auto const& c : cases)
+ benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
+ return 0;
+}
+int dummy2 = RegisterFromFunction();
+ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
+
+#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
+
+//----------------------------------------------------------------------------//
+// Test RegisterBenchmark with different callable types
+//----------------------------------------------------------------------------//
+
+struct CustomFixture {
+ void operator()(benchmark::State& st) {
+ for (auto _ : st) {
+ }
+ }
+};
+
+void TestRegistrationAtRuntime() {
+#ifdef BENCHMARK_HAS_CXX11
+ {
+ CustomFixture fx;
+ benchmark::RegisterBenchmark("custom_fixture", fx);
+ AddCases({"custom_fixture"});
+ }
+#endif
+#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
+ {
+ const char* x = "42";
+ auto capturing_lam = [=](benchmark::State& st) {
+ for (auto _ : st) {
+ }
+ st.SetLabel(x);
+ };
+ benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
+ AddCases({{"lambda_benchmark", x}});
+ }
+#endif
+}
+
+// Test that all benchmarks, registered at either during static init or runtime,
+// are run and the results are passed to the reported.
+void RunTestOne() {
+ TestRegistrationAtRuntime();
+
+ TestReporter test_reporter;
+ benchmark::RunSpecifiedBenchmarks(&test_reporter);
+
+ typedef benchmark::BenchmarkReporter::Run Run;
+ auto EB = ExpectedResults.begin();
+
+ for (Run const& run : test_reporter.all_runs_) {
+ assert(EB != ExpectedResults.end());
+ EB->CheckRun(run);
+ ++EB;
+ }
+ assert(EB == ExpectedResults.end());
+}
+
+// Test that ClearRegisteredBenchmarks() clears all previously registered
+// benchmarks.
+// Also test that new benchmarks can be registered and ran afterwards.
+void RunTestTwo() {
+ assert(ExpectedResults.size() != 0 &&
+ "must have at least one registered benchmark");
+ ExpectedResults.clear();
+ benchmark::ClearRegisteredBenchmarks();
+
+ TestReporter test_reporter;
+ size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
+ assert(num_ran == 0);
+ assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end());
+
+ TestRegistrationAtRuntime();
+ num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
+ assert(num_ran == ExpectedResults.size());
+
+ typedef benchmark::BenchmarkReporter::Run Run;
+ auto EB = ExpectedResults.begin();
+
+ for (Run const& run : test_reporter.all_runs_) {
+ assert(EB != ExpectedResults.end());
+ EB->CheckRun(run);
+ ++EB;
+ }
+ assert(EB == ExpectedResults.end());
+}
+
+int main(int argc, char* argv[]) {
+ benchmark::Initialize(&argc, argv);
+
+ RunTestOne();
+ RunTestTwo();
+}
diff --git a/third-party/benchmark/test/repetitions_test.cc b/third-party/benchmark/test/repetitions_test.cc
new file mode 100644
index 000000000000..f93de502a35a
--- /dev/null
+++ b/third-party/benchmark/test/repetitions_test.cc
@@ -0,0 +1,208 @@
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// ========================================================================= //
+// ------------------------ Testing Basic Output --------------------------- //
+// ========================================================================= //
+
+void BM_ExplicitRepetitions(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_ExplicitRepetitions)->Repetitions(2);
+
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_ExplicitRepetitions/repeats:2_mean %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_ExplicitRepetitions/repeats:2_median %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_ExplicitRepetitions/repeats:2_stddev %console_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_mean\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_median\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_stddev\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_ExplicitRepetitions/repeats:2_mean\",%csv_report$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_ExplicitRepetitions/repeats:2_median\",%csv_report$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_ExplicitRepetitions/repeats:2_stddev\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Basic Output --------------------------- //
+// ========================================================================= //
+
+void BM_ImplicitRepetitions(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_ImplicitRepetitions);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_mean %console_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_median %console_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_stddev %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_mean\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_median\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_stddev\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_mean\",%csv_report$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_median\",%csv_report$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_stddev\",%csv_report$"}});
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/report_aggregates_only_test.cc b/third-party/benchmark/test/report_aggregates_only_test.cc
new file mode 100644
index 000000000000..9646b9be534d
--- /dev/null
+++ b/third-party/benchmark/test/report_aggregates_only_test.cc
@@ -0,0 +1,39 @@
+
+#undef NDEBUG
+#include <cstdio>
+#include <string>
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// Ok this test is super ugly. We want to check what happens with the file
+// reporter in the presence of ReportAggregatesOnly().
+// We do not care about console output, the normal tests check that already.
+
+void BM_SummaryRepeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
+
+int main(int argc, char* argv[]) {
+ const std::string output = GetFileReporterOutput(argc, argv);
+
+ if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
+ 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
+ 1) {
+ std::cout << "Precondition mismatch. Expected to only find three "
+ "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
+ "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
+ "output:\n";
+ std::cout << output;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/third-party/benchmark/test/reporter_output_test.cc b/third-party/benchmark/test/reporter_output_test.cc
new file mode 100644
index 000000000000..989eb48ecc81
--- /dev/null
+++ b/third-party/benchmark/test/reporter_output_test.cc
@@ -0,0 +1,956 @@
+
+#undef NDEBUG
+#include <utility>
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// ========================================================================= //
+// ---------------------- Testing Prologue Output -------------------------- //
+// ========================================================================= //
+
+ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
+ {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
+ {"^[-]+$", MR_Next}});
+static int AddContextCases() {
+ AddCases(TC_ConsoleErr,
+ {
+ {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
+ {"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
+ {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
+ });
+ AddCases(TC_JSONOut,
+ {{"^\\{", MR_Default},
+ {"\"context\":", MR_Next},
+ {"\"date\": \"", MR_Next},
+ {"\"host_name\":", MR_Next},
+ {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
+ MR_Next},
+ {"\"num_cpus\": %int,$", MR_Next},
+ {"\"mhz_per_cpu\": %float,$", MR_Next},
+ {"\"caches\": \\[$", MR_Default}});
+ auto const& Info = benchmark::CPUInfo::Get();
+ auto const& Caches = Info.caches;
+ if (!Caches.empty()) {
+ AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
+ }
+ for (size_t I = 0; I < Caches.size(); ++I) {
+ std::string num_caches_str =
+ Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
+ AddCases(TC_ConsoleErr,
+ {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
+ MR_Next}});
+ AddCases(TC_JSONOut, {{"\\{$", MR_Next},
+ {"\"type\": \"", MR_Next},
+ {"\"level\": %int,$", MR_Next},
+ {"\"size\": %int,$", MR_Next},
+ {"\"num_sharing\": %int$", MR_Next},
+ {"}[,]{0,1}$", MR_Next}});
+ }
+ AddCases(TC_JSONOut, {{"],$"}});
+ auto const& LoadAvg = Info.load_avg;
+ if (!LoadAvg.empty()) {
+ AddCases(TC_ConsoleErr,
+ {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
+ }
+ AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
+ return 0;
+}
+int dummy_register = AddContextCases();
+ADD_CASES(TC_CSVOut, {{"%csv_header"}});
+
+// ========================================================================= //
+// ------------------------ Testing Basic Output --------------------------- //
+// ========================================================================= //
+
+void BM_basic(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_basic);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_basic\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Bytes per Second Output ---------------- //
+// ========================================================================= //
+
+void BM_bytes_per_second(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ state.SetBytesProcessed(1);
+}
+BENCHMARK(BM_bytes_per_second);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
+ "bytes_per_second=%float[kM]{0,1}/s$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bytes_per_second\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Items per Second Output ---------------- //
+// ========================================================================= //
+
+void BM_items_per_second(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ state.SetItemsProcessed(1);
+}
+BENCHMARK(BM_items_per_second);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
+ "items_per_second=%float[kM]{0,1}/s$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
+ {"\"family_index\": 2,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_items_per_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"items_per_second\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Label Output --------------------------- //
+// ========================================================================= //
+
+void BM_label(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ state.SetLabel("some label");
+}
+BENCHMARK(BM_label);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
+ {"\"family_index\": 3,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_label\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"label\": \"some label\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
+ "label\"%csv_label_report_end$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Time Label Output ---------------------- //
+// ========================================================================= //
+
+void BM_time_label_nanosecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_nanosecond\",$"},
+ {"\"family_index\": 4,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
+
+void BM_time_label_microsecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_microsecond\",$"},
+ {"\"family_index\": 5,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"us\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
+
+void BM_time_label_millisecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_millisecond\",$"},
+ {"\"family_index\": 6,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ms\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
+
+void BM_time_label_second(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
+ {"\"family_index\": 7,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_time_label_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"s\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Error Output --------------------------- //
+// ========================================================================= //
+
+void BM_error(benchmark::State& state) {
+ state.SkipWithError("message");
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_error);
+ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
+ {"\"family_index\": 8,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_error\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"error_occurred\": true,$", MR_Next},
+ {"\"error_message\": \"message\",$", MR_Next}});
+
+ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
+
+// ========================================================================= //
+// ------------------------ Testing No Arg Name Output -----------------------
+// //
+// ========================================================================= //
+
+void BM_no_arg_name(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_no_arg_name)->Arg(3);
+ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
+ {"\"family_index\": 9,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Arg Name Output ----------------------- //
+// ========================================================================= //
+
+void BM_arg_name(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
+ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
+ {"\"family_index\": 10,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Arg Names Output ----------------------- //
+// ========================================================================= //
+
+void BM_arg_names(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
+ {"\"family_index\": 11,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Name Output ---------------------------- //
+// ========================================================================= //
+
+void BM_name(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_name)->Name("BM_custom_name");
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"},
+ {"\"family_index\": 12,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_custom_name\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------ Testing Big Args Output ------------------------ //
+// ========================================================================= //
+
+void BM_BigArgs(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
+ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
+ {"^BM_BigArgs/2147483648 %console_report$"}});
+
+// ========================================================================= //
+// ----------------------- Testing Complexity Output ----------------------- //
+// ========================================================================= //
+
+void BM_Complexity_O1(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ state.SetComplexityN(state.range(0));
+}
+BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
+SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
+ {"%RMS", "[ ]*[0-9]+ %"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
+ {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
+
+// ========================================================================= //
+// ----------------------- Testing Aggregate Output ------------------------ //
+// ========================================================================= //
+
+// Test that non-aggregate data is printed by default
+void BM_Repeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+// need two repetitions min to be able to output any aggregate output
+BENCHMARK(BM_Repeat)->Repetitions(2);
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
+ {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
+ {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"family_index\": 15,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"family_index\": 15,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
+ {"\"family_index\": 15,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:2_median\",$"},
+ {"\"family_index\": 15,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
+ {"\"family_index\": 15,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
+// but for two repetitions, mean and median is the same, so let's repeat..
+BENCHMARK(BM_Repeat)->Repetitions(3);
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
+ {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
+ {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3_median\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
+ {"\"family_index\": 16,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
+// median differs between even/odd number of repetitions, so just to be sure
+BENCHMARK(BM_Repeat)->Repetitions(4);
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
+ {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
+ {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"repetition_index\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"repetition_index\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4_median\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
+ {"\"family_index\": 17,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 4,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
+
+// Test that a non-repeated test still prints non-aggregate results even when
+// only-aggregate reports have been requested
+void BM_RepeatOnce(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
+ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
+ {"\"family_index\": 18,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
+
+// Test that non-aggregate data is not reported
+void BM_SummaryRepeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
+ {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
+ {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
+ {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
+ADD_CASES(TC_JSONOut,
+ {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
+ {"\"family_index\": 19,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
+ {"\"family_index\": 19,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
+ {"\"family_index\": 19,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
+ {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
+ {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
+ {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
+
+// Test that non-aggregate data is not displayed.
+// NOTE: this test is kinda bad. we are only testing the display output.
+// But we don't check that the file output still contains everything...
+void BM_SummaryDisplay(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
+ {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
+ {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
+ADD_CASES(TC_JSONOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
+ {"\"family_index\": 20,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
+ {"\"family_index\": 20,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
+ {"\"family_index\": 20,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
+ {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
+ {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
+
+// Test repeats with custom time unit.
+void BM_RepeatTimeUnit(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_RepeatTimeUnit)
+ ->Repetitions(3)
+ ->ReportAggregatesOnly()
+ ->Unit(benchmark::kMicrosecond);
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
+ {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
+ {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
+ "]*3$"},
+ {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
+ "]*3$"}});
+ADD_CASES(TC_JSONOut,
+ {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
+ {"\"family_index\": 21,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
+ {"\"family_index\": 21,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
+ {"\"family_index\": 21,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"}});
+ADD_CASES(TC_CSVOut,
+ {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
+ {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
+ {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
+ {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
+
+// ========================================================================= //
+// -------------------- Testing user-provided statistics ------------------- //
+// ========================================================================= //
+
+const auto UserStatistics = [](const std::vector<double>& v) {
+ return v.back();
+};
+void BM_UserStats(benchmark::State& state) {
+ for (auto _ : state) {
+ state.SetIterationTime(150 / 10e8);
+ }
+}
+// clang-format off
+BENCHMARK(BM_UserStats)
+ ->Repetitions(3)
+ ->Iterations(5)
+ ->UseManualTime()
+ ->ComputeStatistics("", UserStatistics);
+// clang-format on
+
+// check that user-provided stats is calculated, and is after the default-ones
+// empty string as name is intentional, it would sort before anything else
+ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_mean [ ]* 150 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_median [ ]* 150 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
+ "[ ]* 150 ns %time [ ]*3$"}});
+ADD_CASES(
+ TC_JSONOut,
+ {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"repetition_index\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
+ {"\"family_index\": 22,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 3,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_median\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_stddev\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
+
+// ========================================================================= //
+// ------------------------- Testing StrEscape JSON ------------------------ //
+// ========================================================================= //
+#if 0 // enable when csv testing code correctly handles multi-line fields
+void BM_JSON_Format(benchmark::State& state) {
+ state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_JSON_Format);
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
+ {"\"family_index\": 23,$", MR_Next},
+{"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_JSON_Format\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"error_occurred\": true,$", MR_Next},
+ {R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}});
+#endif
+// ========================================================================= //
+// -------------------------- Testing CsvEscape ---------------------------- //
+// ========================================================================= //
+
+void BM_CSV_Format(benchmark::State& state) {
+ state.SkipWithError("\"freedom\"");
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_CSV_Format);
+ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/skip_with_error_test.cc b/third-party/benchmark/test/skip_with_error_test.cc
new file mode 100644
index 000000000000..827966e9dfe3
--- /dev/null
+++ b/third-party/benchmark/test/skip_with_error_test.cc
@@ -0,0 +1,195 @@
+
+#undef NDEBUG
+#include <cassert>
+#include <vector>
+
+#include "../src/check.h" // NOTE: check.h is for internal use only!
+#include "benchmark/benchmark.h"
+
+namespace {
+
+class TestReporter : public benchmark::ConsoleReporter {
+ public:
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ return ConsoleReporter::ReportContext(context);
+ };
+
+ virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ all_runs_.insert(all_runs_.end(), begin(report), end(report));
+ ConsoleReporter::ReportRuns(report);
+ }
+
+ TestReporter() {}
+ virtual ~TestReporter() {}
+
+ mutable std::vector<Run> all_runs_;
+};
+
+struct TestCase {
+ std::string name;
+ bool error_occurred;
+ std::string error_message;
+
+ typedef benchmark::BenchmarkReporter::Run Run;
+
+ void CheckRun(Run const& run) const {
+ CHECK(name == run.benchmark_name())
+ << "expected " << name << " got " << run.benchmark_name();
+ CHECK(error_occurred == run.error_occurred);
+ CHECK(error_message == run.error_message);
+ if (error_occurred) {
+ // CHECK(run.iterations == 0);
+ } else {
+ CHECK(run.iterations != 0);
+ }
+ }
+};
+
+std::vector<TestCase> ExpectedResults;
+
+int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
+ for (auto TC : v) {
+ TC.name = base_name + TC.name;
+ ExpectedResults.push_back(std::move(TC));
+ }
+ return 0;
+}
+
+#define CONCAT(x, y) CONCAT2(x, y)
+#define CONCAT2(x, y) x##y
+#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
+
+} // end namespace
+
+void BM_error_no_running(benchmark::State& state) {
+ state.SkipWithError("error message");
+}
+BENCHMARK(BM_error_no_running);
+ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
+
+void BM_error_before_running(benchmark::State& state) {
+ state.SkipWithError("error message");
+ while (state.KeepRunning()) {
+ assert(false);
+ }
+}
+BENCHMARK(BM_error_before_running);
+ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
+
+void BM_error_before_running_batch(benchmark::State& state) {
+ state.SkipWithError("error message");
+ while (state.KeepRunningBatch(17)) {
+ assert(false);
+ }
+}
+BENCHMARK(BM_error_before_running_batch);
+ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
+
+void BM_error_before_running_range_for(benchmark::State& state) {
+ state.SkipWithError("error message");
+ for (auto _ : state) {
+ assert(false);
+ }
+}
+BENCHMARK(BM_error_before_running_range_for);
+ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
+
+void BM_error_during_running(benchmark::State& state) {
+ int first_iter = true;
+ while (state.KeepRunning()) {
+ if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
+ assert(first_iter);
+ first_iter = false;
+ state.SkipWithError("error message");
+ } else {
+ state.PauseTiming();
+ state.ResumeTiming();
+ }
+ }
+}
+BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
+ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
+ {"/1/threads:2", true, "error message"},
+ {"/1/threads:4", true, "error message"},
+ {"/1/threads:8", true, "error message"},
+ {"/2/threads:1", false, ""},
+ {"/2/threads:2", false, ""},
+ {"/2/threads:4", false, ""},
+ {"/2/threads:8", false, ""}});
+
+void BM_error_during_running_ranged_for(benchmark::State& state) {
+ assert(state.max_iterations > 3 && "test requires at least a few iterations");
+ int first_iter = true;
+ // NOTE: Users should not write the for loop explicitly.
+ for (auto It = state.begin(), End = state.end(); It != End; ++It) {
+ if (state.range(0) == 1) {
+ assert(first_iter);
+ first_iter = false;
+ state.SkipWithError("error message");
+ // Test the unfortunate but documented behavior that the ranged-for loop
+ // doesn't automatically terminate when SkipWithError is set.
+ assert(++It != End);
+ break; // Required behavior
+ }
+ }
+}
+BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
+ADD_CASES("BM_error_during_running_ranged_for",
+ {{"/1/iterations:5", true, "error message"},
+ {"/2/iterations:5", false, ""}});
+
+void BM_error_after_running(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ if (state.thread_index <= (state.threads / 2))
+ state.SkipWithError("error message");
+}
+BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
+ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
+ {"/threads:2", true, "error message"},
+ {"/threads:4", true, "error message"},
+ {"/threads:8", true, "error message"}});
+
+void BM_error_while_paused(benchmark::State& state) {
+ bool first_iter = true;
+ while (state.KeepRunning()) {
+ if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
+ assert(first_iter);
+ first_iter = false;
+ state.PauseTiming();
+ state.SkipWithError("error message");
+ } else {
+ state.PauseTiming();
+ state.ResumeTiming();
+ }
+ }
+}
+BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
+ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
+ {"/1/threads:2", true, "error message"},
+ {"/1/threads:4", true, "error message"},
+ {"/1/threads:8", true, "error message"},
+ {"/2/threads:1", false, ""},
+ {"/2/threads:2", false, ""},
+ {"/2/threads:4", false, ""},
+ {"/2/threads:8", false, ""}});
+
+int main(int argc, char* argv[]) {
+ benchmark::Initialize(&argc, argv);
+
+ TestReporter test_reporter;
+ benchmark::RunSpecifiedBenchmarks(&test_reporter);
+
+ typedef benchmark::BenchmarkReporter::Run Run;
+ auto EB = ExpectedResults.begin();
+
+ for (Run const& run : test_reporter.all_runs_) {
+ assert(EB != ExpectedResults.end());
+ EB->CheckRun(run);
+ ++EB;
+ }
+ assert(EB == ExpectedResults.end());
+
+ return 0;
+}
diff --git a/third-party/benchmark/test/state_assembly_test.cc b/third-party/benchmark/test/state_assembly_test.cc
new file mode 100644
index 000000000000..7ddbb3b2a92c
--- /dev/null
+++ b/third-party/benchmark/test/state_assembly_test.cc
@@ -0,0 +1,68 @@
+#include <benchmark/benchmark.h>
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wreturn-type"
+#endif
+
+// clang-format off
+extern "C" {
+ extern int ExternInt;
+ benchmark::State& GetState();
+ void Fn();
+}
+// clang-format on
+
+using benchmark::State;
+
+// CHECK-LABEL: test_for_auto_loop:
+extern "C" int test_for_auto_loop() {
+ State& S = GetState();
+ int x = 42;
+ // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
+ // CHECK-NEXT: testq %rbx, %rbx
+ // CHECK-NEXT: je [[LOOP_END:.*]]
+
+ for (auto _ : S) {
+ // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
+ // CHECK-GNU-NEXT: subq $1, %rbx
+ // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
+ // CHECK-NEXT: jne .L[[LOOP_HEAD]]
+ benchmark::DoNotOptimize(x);
+ }
+ // CHECK: [[LOOP_END]]:
+ // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
+
+ // CHECK: movl $101, %eax
+ // CHECK: ret
+ return 101;
+}
+
+// CHECK-LABEL: test_while_loop:
+extern "C" int test_while_loop() {
+ State& S = GetState();
+ int x = 42;
+
+ // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
+ // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
+ while (S.KeepRunning()) {
+ // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
+ // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
+ // CHECK: movq %[[IREG]], [[DEST:.*]]
+ benchmark::DoNotOptimize(x);
+ }
+ // CHECK-DAG: movq [[DEST]], %[[IREG]]
+ // CHECK-DAG: testq %[[IREG]], %[[IREG]]
+ // CHECK-DAG: jne .L[[LOOP_BODY]]
+ // CHECK-DAG: .L[[LOOP_HEADER]]:
+
+ // CHECK: cmpb $0
+ // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
+ // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
+
+ // CHECK: .L[[LOOP_END]]:
+ // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
+
+ // CHECK: movl $101, %eax
+ // CHECK: ret
+ return 101;
+}
diff --git a/third-party/benchmark/test/statistics_gtest.cc b/third-party/benchmark/test/statistics_gtest.cc
new file mode 100644
index 000000000000..3ddc72dd7ac6
--- /dev/null
+++ b/third-party/benchmark/test/statistics_gtest.cc
@@ -0,0 +1,28 @@
+//===---------------------------------------------------------------------===//
+// statistics_test - Unit tests for src/statistics.cc
+//===---------------------------------------------------------------------===//
+
+#include "../src/statistics.h"
+#include "gtest/gtest.h"
+
+namespace {
+TEST(StatisticsTest, Mean) {
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
+}
+
+TEST(StatisticsTest, Median) {
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
+}
+
+TEST(StatisticsTest, StdDev) {
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
+ 1.151086443322134);
+}
+
+} // end namespace
diff --git a/third-party/benchmark/test/string_util_gtest.cc b/third-party/benchmark/test/string_util_gtest.cc
new file mode 100644
index 000000000000..c7061b409e91
--- /dev/null
+++ b/third-party/benchmark/test/string_util_gtest.cc
@@ -0,0 +1,161 @@
+//===---------------------------------------------------------------------===//
+// statistics_test - Unit tests for src/statistics.cc
+//===---------------------------------------------------------------------===//
+
+#include "../src/string_util.h"
+#include "../src/internal_macros.h"
+#include "gtest/gtest.h"
+
+namespace {
+TEST(StringUtilTest, stoul) {
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
+ EXPECT_EQ(1ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
+ EXPECT_EQ(1ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
+ EXPECT_EQ(3ul, pos);
+ }
+#if ULONG_MAX == 0xFFFFFFFFul
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
+ EXPECT_EQ(10ul, pos);
+ }
+#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
+ EXPECT_EQ(20ul, pos);
+ }
+#endif
+ {
+ size_t pos = 0;
+ EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
+ EXPECT_EQ(4ul, pos);
+ }
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+ {
+ ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
+ }
+#endif
+}
+
+TEST(StringUtilTest, stoi) {
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0, benchmark::stoi("0", &pos));
+ EXPECT_EQ(1ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
+ EXPECT_EQ(3ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
+ EXPECT_EQ(4ul, pos);
+ }
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+ {
+ ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
+ }
+#endif
+}
+
+TEST(StringUtilTest, stod) {
+ {
+ size_t pos = 0;
+ EXPECT_EQ(0.0, benchmark::stod("0", &pos));
+ EXPECT_EQ(1ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
+ EXPECT_EQ(3ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
+ EXPECT_EQ(4ul, pos);
+ }
+ {
+ size_t pos = 0;
+ EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
+ EXPECT_EQ(3ul, pos);
+ }
+ {
+ size_t pos = 0;
+ /* Note: exactly representable as double */
+ EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
+ EXPECT_EQ(8ul, pos);
+ }
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+ {
+ ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
+ }
+#endif
+}
+
+TEST(StringUtilTest, StrSplit) {
+ EXPECT_EQ(benchmark::StrSplit("", ','), std::vector<std::string>{});
+ EXPECT_EQ(benchmark::StrSplit("hello", ','),
+ std::vector<std::string>({"hello"}));
+ EXPECT_EQ(benchmark::StrSplit("hello,there,is,more", ','),
+ std::vector<std::string>({"hello", "there", "is", "more"}));
+}
+
+} // end namespace
diff --git a/third-party/benchmark/test/templated_fixture_test.cc b/third-party/benchmark/test/templated_fixture_test.cc
new file mode 100644
index 000000000000..fe9865cc776f
--- /dev/null
+++ b/third-party/benchmark/test/templated_fixture_test.cc
@@ -0,0 +1,28 @@
+
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <memory>
+
+template <typename T>
+class MyFixture : public ::benchmark::Fixture {
+ public:
+ MyFixture() : data(0) {}
+
+ T data;
+};
+
+BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) {
+ for (auto _ : st) {
+ data += 1;
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ data += 1.0;
+ }
+}
+BENCHMARK_REGISTER_F(MyFixture, Bar);
+
+BENCHMARK_MAIN();
diff --git a/third-party/benchmark/test/user_counters_tabular_test.cc b/third-party/benchmark/test/user_counters_tabular_test.cc
new file mode 100644
index 000000000000..421f27b5cb8b
--- /dev/null
+++ b/third-party/benchmark/test/user_counters_tabular_test.cc
@@ -0,0 +1,500 @@
+
+#undef NDEBUG
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// @todo: <jpmag> this checks the full output at once; the rule for
+// CounterSet1 was failing because it was not matching "^[-]+$".
+// @todo: <jpmag> check that the counters are vertically aligned.
+ADD_CASES(TC_ConsoleOut,
+ {
+ // keeping these lines long improves readability, so:
+ // clang-format off
+ {"^[-]+$", MR_Next},
+ {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
+ {"^[-]+$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
+ {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
+ {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
+ {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
+ {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
+ {"^[-]+$", MR_Next},
+ {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next},
+ {"^[-]+$", MR_Next},
+ {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^[-]+$", MR_Next},
+ {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next},
+ {"^[-]+$", MR_Next},
+ {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
+ // clang-format on
+ });
+ADD_CASES(TC_CSVOut, {{"%csv_header,"
+ "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
+
+// ========================================================================= //
+// ------------------------- Tabular Counters Output ----------------------- //
+// ========================================================================= //
+
+void BM_Counters_Tabular(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"Foo", {1, bm::Counter::kAvgThreads}},
+ {"Bar", {2, bm::Counter::kAvgThreads}},
+ {"Baz", {4, bm::Counter::kAvgThreads}},
+ {"Bat", {8, bm::Counter::kAvgThreads}},
+ {"Frob", {16, bm::Counter::kAvgThreads}},
+ {"Lob", {32, bm::Counter::kAvgThreads}},
+ });
+}
+BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2);
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 1,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 2,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 1,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 2,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 1,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 2,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 1,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 2,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckTabular(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
+ CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
+ CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
+ CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
+ CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
+ CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$",
+ &CheckTabular);
+CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
+ &CheckTabular);
+
+// ========================================================================= //
+// -------------------- Tabular+Rate Counters Output ----------------------- //
+// ========================================================================= //
+
+void BM_CounterRates_Tabular(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"Foo", {1, bm::Counter::kAvgThreadsRate}},
+ {"Bar", {2, bm::Counter::kAvgThreadsRate}},
+ {"Baz", {4, bm::Counter::kAvgThreadsRate}},
+ {"Bat", {8, bm::Counter::kAvgThreadsRate}},
+ {"Frob", {16, bm::Counter::kAvgThreadsRate}},
+ {"Lob", {32, bm::Counter::kAvgThreadsRate}},
+ });
+}
+BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckTabularRate(Results const& e) {
+ double t = e.DurationCPUTime();
+ CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
+ &CheckTabularRate);
+
+// ========================================================================= //
+// ------------------------- Tabular Counters Output ----------------------- //
+// ========================================================================= //
+
+// set only some of the counters
+void BM_CounterSet0_Tabular(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"Foo", {10, bm::Counter::kAvgThreads}},
+ {"Bar", {20, bm::Counter::kAvgThreads}},
+ {"Baz", {40, bm::Counter::kAvgThreads}},
+ });
+}
+BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
+ {"\"family_index\": 2,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
+ "%float,,%float,%float,,"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckSet0(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
+ CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
+ CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
+}
+CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
+
+// again.
+void BM_CounterSet1_Tabular(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"Foo", {15, bm::Counter::kAvgThreads}},
+ {"Bar", {25, bm::Counter::kAvgThreads}},
+ {"Baz", {45, bm::Counter::kAvgThreads}},
+ });
+}
+BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
+ {"\"family_index\": 3,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
+ "%float,,%float,%float,,"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckSet1(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
+ CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
+ CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
+}
+CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
+
+// ========================================================================= //
+// ------------------------- Tabular Counters Output ----------------------- //
+// ========================================================================= //
+
+// set only some of the counters, different set now.
+void BM_CounterSet2_Tabular(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"Foo", {10, bm::Counter::kAvgThreads}},
+ {"Bat", {30, bm::Counter::kAvgThreads}},
+ {"Baz", {40, bm::Counter::kAvgThreads}},
+ });
+}
+BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
+ {"\"family_index\": 4,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
+ ",%float,%float,%float,,"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckSet2(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
+ CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
+ CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
+}
+CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/user_counters_test.cc b/third-party/benchmark/test/user_counters_test.cc
new file mode 100644
index 000000000000..377bb32ca948
--- /dev/null
+++ b/third-party/benchmark/test/user_counters_test.cc
@@ -0,0 +1,555 @@
+
+#undef NDEBUG
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// ========================================================================= //
+// ---------------------- Testing Prologue Output -------------------------- //
+// ========================================================================= //
+
+// clang-format off
+
+ADD_CASES(TC_ConsoleOut,
+ {{"^[-]+$", MR_Next},
+ {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next},
+ {"^[-]+$", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
+
+// clang-format on
+
+// ========================================================================= //
+// ------------------------- Simple Counters Output ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_Simple(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ state.counters["foo"] = 1;
+ state.counters["bar"] = 2 * (double)state.iterations();
+}
+BENCHMARK(BM_Counters_Simple);
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckSimple(Results const& e) {
+ double its = e.NumIterations();
+ CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
+ // check that the value of bar is within 0.1% of the expected value
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
+
+// ========================================================================= //
+// --------------------- Counters+Items+Bytes/s Output --------------------- //
+// ========================================================================= //
+
+namespace {
+int num_calls1 = 0;
+}
+void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ state.counters["foo"] = 1;
+ state.counters["bar"] = ++num_calls1;
+ state.SetBytesProcessed(364);
+ state.SetItemsProcessed(150);
+}
+BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
+ "bar=%hrfloat bytes_per_second=%hrfloat/s "
+ "foo=%hrfloat items_per_second=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
+ {"\"family_index\": 1,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"bytes_per_second\": %float,$", MR_Next},
+ {"\"foo\": %float,$", MR_Next},
+ {"\"items_per_second\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
+ "%csv_bytes_items_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckBytesAndItemsPSec(Results const& e) {
+ double t = e.DurationCPUTime(); // this (and not real time) is the time used
+ CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
+ CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
+ // check that the values are within 0.1% of the expected values
+ CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
+ CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
+ &CheckBytesAndItemsPSec);
+
+// ========================================================================= //
+// ------------------------- Rate Counters Output -------------------------- //
+// ========================================================================= //
+
+void BM_Counters_Rate(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
+ state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
+}
+BENCHMARK(BM_Counters_Rate);
+ADD_CASES(
+ TC_ConsoleOut,
+ {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
+ {"\"family_index\": 2,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckRate(Results const& e) {
+ double t = e.DurationCPUTime(); // this (and not real time) is the time used
+ // check that the values are within 0.1% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
+
+// ========================================================================= //
+// ----------------------- Inverted Counters Output ------------------------ //
+// ========================================================================= //
+
+void BM_Invert(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
+ state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
+}
+BENCHMARK(BM_Invert);
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
+ {"\"family_index\": 3,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Invert\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckInvert(Results const& e) {
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
+
+// ========================================================================= //
+// ------------------------- InvertedRate Counters Output
+// -------------------------- //
+// ========================================================================= //
+
+void BM_Counters_InvertedRate(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] =
+ bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
+ state.counters["bar"] =
+ bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
+}
+BENCHMARK(BM_Counters_InvertedRate);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
+ "bar=%hrfloats foo=%hrfloats$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_InvertedRate\",$"},
+ {"\"family_index\": 4,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckInvertedRate(Results const& e) {
+ double t = e.DurationCPUTime(); // this (and not real time) is the time used
+ // check that the values are within 0.1% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
+
+// ========================================================================= //
+// ------------------------- Thread Counters Output ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_Threads(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ state.counters["foo"] = 1;
+ state.counters["bar"] = 2;
+}
+BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
+ "bar=%hrfloat foo=%hrfloat$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
+ {"\"family_index\": 5,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckThreads(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
+ CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
+
+// ========================================================================= //
+// ---------------------- ThreadAvg Counters Output ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_AvgThreads(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
+ state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
+}
+BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
+ "%console_report bar=%hrfloat foo=%hrfloat$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
+ {"\"family_index\": 6,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckAvgThreads(Results const& e) {
+ CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
+ CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
+ &CheckAvgThreads);
+
+// ========================================================================= //
+// ---------------------- ThreadAvg Counters Output ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_AvgThreadsRate(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
+ state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
+}
+BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
+ "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
+ {"\"family_index\": 7,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
+ "threads:%int\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckAvgThreadsRate(Results const& e) {
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
+ &CheckAvgThreadsRate);
+
+// ========================================================================= //
+// ------------------- IterationInvariant Counters Output ------------------ //
+// ========================================================================= //
+
+void BM_Counters_IterationInvariant(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
+ state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
+}
+BENCHMARK(BM_Counters_IterationInvariant);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
+ "bar=%hrfloat foo=%hrfloat$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
+ {"\"family_index\": 8,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckIterationInvariant(Results const& e) {
+ double its = e.NumIterations();
+ // check that the values are within 0.1% of the expected value
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
+ &CheckIterationInvariant);
+
+// ========================================================================= //
+// ----------------- IterationInvariantRate Counters Output ---------------- //
+// ========================================================================= //
+
+void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] =
+ bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
+ state.counters["bar"] =
+ bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
+}
+BENCHMARK(BM_Counters_kIsIterationInvariantRate);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
+ "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
+ {"\"family_index\": 9,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
+ "%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckIsIterationInvariantRate(Results const& e) {
+ double its = e.NumIterations();
+ double t = e.DurationCPUTime(); // this (and not real time) is the time used
+ // check that the values are within 0.1% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
+ &CheckIsIterationInvariantRate);
+
+// ========================================================================= //
+// ------------------- AvgIterations Counters Output ------------------ //
+// ========================================================================= //
+
+void BM_Counters_AvgIterations(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
+ state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
+}
+BENCHMARK(BM_Counters_AvgIterations);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
+ "bar=%hrfloat foo=%hrfloat$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_AvgIterations\",$"},
+ {"\"family_index\": 10,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckAvgIterations(Results const& e) {
+ double its = e.NumIterations();
+ // check that the values are within 0.1% of the expected value
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
+
+// ========================================================================= //
+// ----------------- AvgIterationsRate Counters Output ---------------- //
+// ========================================================================= //
+
+void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
+ for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ }
+ namespace bm = benchmark;
+ state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
+ state.counters["bar"] =
+ bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
+}
+BENCHMARK(BM_Counters_kAvgIterationsRate);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
+ "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
+ {"\"family_index\": 11,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 1,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
+ "%float,%float$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckAvgIterationsRate(Results const& e) {
+ double its = e.NumIterations();
+ double t = e.DurationCPUTime(); // this (and not real time) is the time used
+ // check that the values are within 0.1% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
+ &CheckAvgIterationsRate);
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/third-party/benchmark/test/user_counters_thousands_test.cc b/third-party/benchmark/test/user_counters_thousands_test.cc
new file mode 100644
index 000000000000..bbe194264ed4
--- /dev/null
+++ b/third-party/benchmark/test/user_counters_thousands_test.cc
@@ -0,0 +1,183 @@
+
+#undef NDEBUG
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// ========================================================================= //
+// ------------------------ Thousands Customisation ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_Thousands(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"t0_1000000DefaultBase",
+ bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
+ {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1000)},
+ {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1024)},
+ {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1000)},
+ {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1024)},
+ });
+}
+BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
+ADD_CASES(
+ TC_ConsoleOut,
+ {
+ {"^BM_Counters_Thousands/repeats:2 %console_report "
+ "t0_1000000DefaultBase=1000k "
+ "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
+ "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2 %console_report "
+ "t0_1000000DefaultBase=1000k "
+ "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
+ "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_mean %console_report "
+ "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
+ "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
+ "t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_median %console_report "
+ "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
+ "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
+ "t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
+ "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
+ "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
+ });
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"repetition_index\": 1,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
+ {"\"family_index\": 0,$", MR_Next},
+ {"\"per_family_instance_index\": 0,$", MR_Next},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"repetitions\": 2,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
+ {"}", MR_Next}});
+
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_Counters_Thousands/"
+ "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
+ "0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
+ "0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
+ "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
+ "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckThousands(Results const& e) {
+ if (e.name != "BM_Counters_Thousands/repeats:2")
+ return; // Do not check the aggregates!
+
+ // check that the values are within 0.01% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
+ 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }