diff options
Diffstat (limited to 'src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc')
-rw-r--r-- | src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc | 520 |
1 files changed, 338 insertions, 182 deletions
diff --git a/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc index 5ad4904f971..7dac65a03be 100644 --- a/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc +++ b/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set_test.cc @@ -4,7 +4,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -14,6 +14,7 @@ #include "absl/container/internal/raw_hash_set.h" +#include <atomic> #include <cmath> #include <cstdint> #include <deque> @@ -22,10 +23,13 @@ #include <numeric> #include <random> #include <string> +#include <unordered_map> +#include <unordered_set> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/container_memory.h" @@ -35,6 +39,7 @@ #include "absl/strings/string_view.h" namespace absl { +ABSL_NAMESPACE_BEGIN namespace container_internal { struct RawHashSetTestOnlyAccess { @@ -46,20 +51,54 @@ struct RawHashSetTestOnlyAccess { namespace { -using ::testing::DoubleNear; using ::testing::ElementsAre; -using ::testing::Optional; +using ::testing::Eq; +using ::testing::Ge; +using ::testing::Lt; using ::testing::Pair; using ::testing::UnorderedElementsAre; TEST(Util, NormalizeCapacity) { - constexpr size_t kMinCapacity = Group::kWidth - 1; - EXPECT_EQ(kMinCapacity, NormalizeCapacity(0)); - EXPECT_EQ(kMinCapacity, NormalizeCapacity(1)); - EXPECT_EQ(kMinCapacity, NormalizeCapacity(2)); - EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity)); - EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1)); - EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2)); + EXPECT_EQ(1, NormalizeCapacity(0)); + EXPECT_EQ(1, NormalizeCapacity(1)); + EXPECT_EQ(3, NormalizeCapacity(2)); + EXPECT_EQ(3, NormalizeCapacity(3)); + EXPECT_EQ(7, NormalizeCapacity(4)); + EXPECT_EQ(7, NormalizeCapacity(7)); + EXPECT_EQ(15, NormalizeCapacity(8)); + EXPECT_EQ(15, NormalizeCapacity(15)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); +} + +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. + for (size_t growth = 0; growth < 10000; ++growth) { + SCOPED_TRACE(growth); + size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); + // The capacity is large enough for `growth`. + EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + // For (capacity+1) < kWidth, growth should equal capacity. + if (capacity + 1 < Group::kWidth) { + EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); + } else { + EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); + } + if (growth != 0 && capacity > 1) { + // There is no smaller capacity that works. + EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + } + } + + for (size_t capacity = Group::kWidth - 1; capacity < 10000; + capacity = 2 * capacity + 1) { + SCOPED_TRACE(capacity); + size_t growth = CapacityToGrowth(capacity); + EXPECT_THAT(growth, Lt(capacity)); + EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); + EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); + } } TEST(Util, probe_seq) { @@ -106,14 +145,14 @@ TEST(BitMask, WithShift) { } TEST(BitMask, LeadingTrailing) { - EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3); - EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6); - EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15); - EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0); - EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0); - EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0); + EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15); EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3); EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1); @@ -219,25 +258,43 @@ TEST(Group, CountLeadingEmptyOrDeleted) { } } -struct IntPolicy { - using slot_type = int64_t; - using key_type = int64_t; - using init_type = int64_t; +template <class T> +struct ValuePolicy { + using slot_type = T; + using key_type = T; + using init_type = T; - static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } - static void destroy(void*, int64_t*) {} - static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { - *new_slot = *old_slot; + template <class Allocator, class... Args> + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits<Allocator>::construct(*alloc, slot, + std::forward<Args>(args)...); } - static int64_t& element(slot_type* slot) { return *slot; } + template <class Allocator> + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits<Allocator>::destroy(*alloc, slot); + } - template <class F> - static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) { - return std::forward<F>(f)(x, x); + template <class Allocator> + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template <class F, class... Args> + static decltype(absl::container_internal::DecomposeValue( + std::declval<F>(), std::declval<Args>()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward<F>(f), std::forward<Args>(args)...); } }; +using IntPolicy = ValuePolicy<int64_t>; + class StringPolicy { template <class F, class K, class V, class = typename std::enable_if< @@ -314,7 +371,25 @@ struct IntTable : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>, std::equal_to<int64_t>, std::allocator<int64_t>> { using Base = typename IntTable::raw_hash_set; - IntTable() {} + using Base::Base; +}; + +template <typename T> +struct CustomAlloc : std::allocator<T> { + CustomAlloc() {} + + template <typename U> + CustomAlloc(const CustomAlloc<U>& other) {} + + template<class U> struct rebind { + using other = CustomAlloc<U>; + }; +}; + +struct CustomAllocIntTable + : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>, + std::equal_to<int64_t>, CustomAlloc<int64_t>> { + using Base = typename CustomAllocIntTable::raw_hash_set; using Base::Base; }; @@ -342,6 +417,14 @@ TEST(Table, EmptyFunctorOptimization) { size_t size; size_t capacity; size_t growth_left; + void* infoz; + }; + struct MockTableInfozDisabled { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; }; struct StatelessHash { size_t operator()(absl::string_view) const { return 0; } @@ -350,17 +433,27 @@ TEST(Table, EmptyFunctorOptimization) { size_t dummy; }; - EXPECT_EQ( - sizeof(MockTable), - sizeof( - raw_hash_set<StringPolicy, StatelessHash, - std::equal_to<absl::string_view>, std::allocator<int>>)); + if (std::is_empty<HashtablezInfoHandle>::value) { + EXPECT_EQ(sizeof(MockTableInfozDisabled), + sizeof(raw_hash_set<StringPolicy, StatelessHash, + std::equal_to<absl::string_view>, + std::allocator<int>>)); - EXPECT_EQ( - sizeof(MockTable) + sizeof(StatefulHash), - sizeof( - raw_hash_set<StringPolicy, StatefulHash, - std::equal_to<absl::string_view>, std::allocator<int>>)); + EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash), + sizeof(raw_hash_set<StringPolicy, StatefulHash, + std::equal_to<absl::string_view>, + std::allocator<int>>)); + } else { + EXPECT_EQ(sizeof(MockTable), + sizeof(raw_hash_set<StringPolicy, StatelessHash, + std::equal_to<absl::string_view>, + std::allocator<int>>)); + + EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash), + sizeof(raw_hash_set<StringPolicy, StatefulHash, + std::equal_to<absl::string_view>, + std::allocator<int>>)); + } } TEST(Table, Empty) { @@ -369,52 +462,6 @@ TEST(Table, Empty) { EXPECT_TRUE(t.empty()); } -#ifdef __GNUC__ -template <class T> -ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { - asm volatile("" : : "r,m"(v) : "memory"); -} -#endif - -TEST(Table, Prefetch) { - IntTable t; - t.emplace(1); - // Works for both present and absent keys. - t.prefetch(1); - t.prefetch(2); - - // Do not run in debug mode, when prefetch is not implemented, or when - // sanitizers are enabled. -#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \ - !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \ - !defined(UNDEFINED_BEHAVIOR_SANITIZER) - const auto now = [] { return absl::base_internal::CycleClock::Now(); }; - - // Make size enough to not fit in L2 cache (16.7 Mb) - static constexpr int size = 1 << 22; - for (int i = 0; i < size; ++i) t.insert(i); - - int64_t no_prefetch = 0, prefetch = 0; - for (int iter = 0; iter < 10; ++iter) { - int64_t time = now(); - for (int i = 0; i < size; ++i) { - DoNotOptimize(t.find(i)); - } - no_prefetch += now() - time; - - time = now(); - for (int i = 0; i < size; ++i) { - t.prefetch(i + 20); - DoNotOptimize(t.find(i)); - } - prefetch += now() - time; - } - - // no_prefetch is at least 30% slower. - EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); -#endif -} - TEST(Table, LookupEmpty) { IntTable t; auto it = t.find(0); @@ -784,7 +831,7 @@ TEST(Table, EnsureNonQuadraticAsInRust) { TEST(Table, ClearBug) { IntTable t; constexpr size_t capacity = container_internal::Group::kWidth - 1; - constexpr size_t max_size = capacity / 2; + constexpr size_t max_size = capacity / 2 + 1; for (size_t i = 0; i < max_size; ++i) { t.insert(i); } @@ -815,6 +862,25 @@ TEST(Table, Erase) { EXPECT_TRUE(t.find(0) == t.end()); } +TEST(Table, EraseMaintainsValidIterator) { + IntTable t; + const int kNumElements = 100; + for (int i = 0; i < kNumElements; i ++) { + EXPECT_TRUE(t.emplace(i).second); + } + EXPECT_EQ(t.size(), kNumElements); + + int num_erase_calls = 0; + auto it = t.begin(); + while (it != t.end()) { + t.erase(it++); + num_erase_calls++; + } + + EXPECT_TRUE(t.empty()); + EXPECT_EQ(num_erase_calls, kNumElements); +} + // Collect N bad keys by following algorithm: // 1. Create an empty table and reserve it to 2 * N. // 2. Insert N random elements. @@ -824,7 +890,8 @@ TEST(Table, Erase) { std::vector<int64_t> CollectBadMergeKeys(size_t N) { static constexpr int kGroupSize = Group::kWidth - 1; - auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> { + auto topk_range = [](size_t b, size_t e, + IntTable* t) -> std::vector<int64_t> { for (size_t i = b; i != e; ++i) { t->emplace(i); } @@ -978,8 +1045,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>; // 1. Create new table and reserve it to keys.size() * 2 // 2. Insert all keys xored with seed // 3. Collect ProbeStats from final table. -ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys, - size_t num_iters) { +ProbeStats CollectProbeStatsOnKeysXoredWithSeed( + const std::vector<int64_t>& keys, size_t num_iters) { const size_t reserve_size = keys.size() * 2; ProbeStats stats; @@ -1013,7 +1080,7 @@ ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys ExpectedStats XorSeedExpectedStats() { constexpr bool kRandomizesInserts = -#if NDEBUG +#ifdef NDEBUG false; #else // NDEBUG true; @@ -1050,6 +1117,7 @@ ExpectedStats XorSeedExpectedStats() { ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); return {}; } + TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { ProbeStatsPerSize stats; std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1106,7 +1174,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( ExpectedStats LinearTransformExpectedStats() { constexpr bool kRandomizesInserts = -#if NDEBUG +#ifdef NDEBUG false; #else // NDEBUG true; @@ -1143,6 +1211,7 @@ ExpectedStats LinearTransformExpectedStats() { ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); return {}; } + TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { ProbeStatsPerSize stats; std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1295,37 +1364,31 @@ TEST(Table, ConstructFromInitList) { TEST(Table, CopyConstruct) { IntTable t; - t.max_load_factor(.321f); t.emplace(0); EXPECT_EQ(1, t.size()); { IntTable u(t); EXPECT_EQ(1, u.size()); - EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); EXPECT_THAT(*u.find(0), 0); } { IntTable u{t}; EXPECT_EQ(1, u.size()); - EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); EXPECT_THAT(*u.find(0), 0); } { IntTable u = t; EXPECT_EQ(1, u.size()); - EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); EXPECT_THAT(*u.find(0), 0); } } TEST(Table, CopyConstructWithAlloc) { StringTable t; - t.max_load_factor(.321f); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(t, Alloc<std::pair<std::string, std::string>>()); EXPECT_EQ(1, u.size()); - EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } @@ -1343,94 +1406,75 @@ TEST(Table, AllocWithExplicitCtor) { TEST(Table, MoveConstruct) { { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(std::move(t)); EXPECT_EQ(1, u.size()); - EXPECT_EQ(lf, u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u{std::move(t)}; EXPECT_EQ(1, u.size()); - EXPECT_EQ(lf, u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u = std::move(t); EXPECT_EQ(1, u.size()); - EXPECT_EQ(lf, u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } } TEST(Table, MoveConstructWithAlloc) { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>()); EXPECT_EQ(1, u.size()); - EXPECT_EQ(lf, u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, CopyAssign) { StringTable t; - t.max_load_factor(.321f); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u; u = t; EXPECT_EQ(1, u.size()); - EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, CopySelfAssign) { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); t = *&t; EXPECT_EQ(1, t.size()); - EXPECT_EQ(lf, t.max_load_factor()); EXPECT_THAT(*t.find("a"), Pair("a", "b")); } TEST(Table, MoveAssign) { StringTable t; - t.max_load_factor(.321f); - const float lf = t.max_load_factor(); t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u; u = std::move(t); EXPECT_EQ(1, u.size()); - EXPECT_EQ(lf, u.max_load_factor()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, Equality) { StringTable t; - std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}}; + std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, + {"aa", "bb"}}; t.insert(std::begin(v), std::end(v)); StringTable u = t; EXPECT_EQ(u, t); @@ -1438,20 +1482,24 @@ TEST(Table, Equality) { TEST(Table, Equality2) { StringTable t; - std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}}; + std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, + {"aa", "bb"}}; t.insert(std::begin(v1), std::end(v1)); StringTable u; - std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}}; + std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, + {"aa", "aa"}}; u.insert(std::begin(v2), std::end(v2)); EXPECT_NE(u, t); } TEST(Table, Equality3) { StringTable t; - std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}}; + std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, + {"bb", "bb"}}; t.insert(std::begin(v1), std::end(v1)); StringTable u; - std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}}; + std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, + {"aa", "aa"}}; u.insert(std::begin(v2), std::end(v2)); EXPECT_NE(u, t); } @@ -1652,6 +1700,38 @@ TEST(Table, Merge) { EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"))); } +TEST(Table, IteratorEmplaceConstructibleRequirement) { + struct Value { + explicit Value(absl::string_view view) : value(view) {} + std::string value; + + bool operator==(const Value& other) const { return value == other.value; } + }; + struct H { + size_t operator()(const Value& v) const { + return absl::Hash<std::string>{}(v.value); + } + }; + + struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>, + std::allocator<Value>> { + using Base = typename Table::raw_hash_set; + using Base::Base; + }; + + std::string input[3]{"A", "B", "C"}; + + Table t(std::begin(input), std::end(input)); + EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"})); + + input[0] = "D"; + input[1] = "E"; + input[2] = "F"; + t.insert(std::begin(input), std::end(input)); + EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}, + Value{"D"}, Value{"E"}, Value{"F"})); +} + TEST(Nodes, EmptyNodeType) { using node_type = StringTable::node_type; node_type n; @@ -1663,9 +1743,9 @@ TEST(Nodes, EmptyNodeType) { } TEST(Nodes, ExtractInsert) { - constexpr char k0[] = "Very long std::string zero."; - constexpr char k1[] = "Very long std::string one."; - constexpr char k2[] = "Very long std::string two."; + constexpr char k0[] = "Very long string zero."; + constexpr char k1[] = "Very long string one."; + constexpr char k2[] = "Very long string two."; StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}}; EXPECT_THAT(t, UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, ""))); @@ -1676,7 +1756,7 @@ TEST(Nodes, ExtractInsert) { EXPECT_FALSE(node.empty()); StringTable t2; - auto res = t2.insert(std::move(node)); + StringTable::insert_return_type res = t2.insert(std::move(node)); EXPECT_TRUE(res.inserted); EXPECT_THAT(*res.position, Pair(k0, "")); EXPECT_FALSE(res.node); @@ -1706,80 +1786,94 @@ TEST(Nodes, ExtractInsert) { EXPECT_FALSE(node); } -StringTable MakeSimpleTable(size_t size) { - StringTable t; - for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), ""); +TEST(Nodes, HintInsert) { + IntTable t = {1, 2, 3}; + auto node = t.extract(1); + EXPECT_THAT(t, UnorderedElementsAre(2, 3)); + auto it = t.insert(t.begin(), std::move(node)); + EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3)); + EXPECT_EQ(*it, 1); + EXPECT_FALSE(node); + + node = t.extract(2); + EXPECT_THAT(t, UnorderedElementsAre(1, 3)); + // reinsert 2 to make the next insert fail. + t.insert(2); + EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3)); + it = t.insert(t.begin(), std::move(node)); + EXPECT_EQ(*it, 2); + // The node was not emptied by the insert call. + EXPECT_TRUE(node); +} + +IntTable MakeSimpleTable(size_t size) { + IntTable t; + while (t.size() < size) t.insert(t.size()); return t; } -std::string OrderOfIteration(const StringTable& t) { - std::string order; - for (auto& p : t) order += p.first; - return order; +std::vector<int> OrderOfIteration(const IntTable& t) { + return {t.begin(), t.end()}; } +// These IterationOrderChanges tests depend on non-deterministic behavior. +// We are injecting non-determinism from the pointer of the table, but do so in +// a way that only the page matters. We have to retry enough times to make sure +// we are touching different memory pages to cause the ordering to change. +// We also need to keep the old tables around to avoid getting the same memory +// blocks over and over. TEST(Table, IterationOrderChangesByInstance) { - // Needs to be more than kWidth elements to be able to affect order. - const StringTable reference = MakeSimpleTable(20); - - // Since order is non-deterministic we can't just try once and verify. - // We'll try until we find that order changed. It should not take many tries - // for that. - // Important: we have to keep the old tables around. Otherwise tcmalloc will - // just give us the same blocks and we would be doing the same order again. - std::vector<StringTable> garbage; - for (int i = 0; i < 10; ++i) { - auto trial = MakeSimpleTable(20); - if (OrderOfIteration(trial) != OrderOfIteration(reference)) { - // We are done. - return; + for (size_t size : {2, 6, 12, 20}) { + const auto reference_table = MakeSimpleTable(size); + const auto reference = OrderOfIteration(reference_table); + + std::vector<IntTable> tables; + bool found_difference = false; + for (int i = 0; !found_difference && i < 5000; ++i) { + tables.push_back(MakeSimpleTable(size)); + found_difference = OrderOfIteration(tables.back()) != reference; + } + if (!found_difference) { + FAIL() + << "Iteration order remained the same across many attempts with size " + << size; } - garbage.push_back(std::move(trial)); } - FAIL(); } TEST(Table, IterationOrderChangesOnRehash) { - // Since order is non-deterministic we can't just try once and verify. - // We'll try until we find that order changed. It should not take many tries - // for that. - // Important: we have to keep the old tables around. Otherwise tcmalloc will - // just give us the same blocks and we would be doing the same order again. - std::vector<StringTable> garbage; - for (int i = 0; i < 10; ++i) { - // Needs to be more than kWidth elements to be able to affect order. - StringTable t = MakeSimpleTable(20); - const std::string reference = OrderOfIteration(t); + std::vector<IntTable> garbage; + for (int i = 0; i < 5000; ++i) { + auto t = MakeSimpleTable(20); + const auto reference = OrderOfIteration(t); // Force rehash to the same size. t.rehash(0); - std::string trial = OrderOfIteration(t); + auto trial = OrderOfIteration(t); if (trial != reference) { // We are done. return; } garbage.push_back(std::move(t)); } - FAIL(); + FAIL() << "Iteration order remained the same across many attempts."; } -TEST(Table, IterationOrderChangesForSmallTables) { - // Since order is non-deterministic we can't just try once and verify. - // We'll try until we find that order changed. - // Important: we have to keep the old tables around. Otherwise tcmalloc will - // just give us the same blocks and we would be doing the same order again. - StringTable reference_table = MakeSimpleTable(5); - const std::string reference = OrderOfIteration(reference_table); - std::vector<StringTable> garbage; - for (int i = 0; i < 50; ++i) { - StringTable t = MakeSimpleTable(5); - std::string trial = OrderOfIteration(t); - if (trial != reference) { - // We are done. - return; - } - garbage.push_back(std::move(t)); - } - FAIL() << "Iteration order remained the same across many attempts."; +// Verify that pointers are invalidated as soon as a second element is inserted. +// This prevents dependency on pointer stability on small tables. +TEST(Table, UnstablePointers) { + IntTable table; + + const auto addr = [&](int i) { + return reinterpret_cast<uintptr_t>(&*table.find(i)); + }; + + table.insert(0); + const uintptr_t old_ptr = addr(0); + + // This causes a rehash. + table.insert(1); + + EXPECT_NE(old_ptr, addr(0)); } // Confirm that we assert if we try to erase() end(). @@ -1794,13 +1888,74 @@ TEST(TableDeathTest, EraseOfEndAsserts) { IntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. - constexpr char kDeathMsg[] = "it != end"; + constexpr char kDeathMsg[] = "Invalid operation on iterator"; EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg); } -#ifdef ADDRESS_SANITIZER +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +TEST(RawHashSamplerTest, Sample) { + // Enable the feature even if the prod default is off. + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + + auto& sampler = HashtablezSampler::Global(); + size_t start_size = 0; + std::unordered_set<const HashtablezInfo*> preexisting_info; + start_size += sampler.Iterate([&](const HashtablezInfo& info) { + preexisting_info.insert(&info); + ++start_size; + }); + + std::vector<IntTable> tables; + for (int i = 0; i < 1000000; ++i) { + tables.emplace_back(); + tables.back().insert(1); + tables.back().insert(i % 5); + } + size_t end_size = 0; + std::unordered_map<size_t, int> observed_checksums; + end_size += sampler.Iterate([&](const HashtablezInfo& info) { + if (preexisting_info.count(&info) == 0) { + observed_checksums[info.hashes_bitwise_xor.load( + std::memory_order_relaxed)]++; + } + ++end_size; + }); + + EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), + 0.01, 0.005); + EXPECT_EQ(observed_checksums.size(), 5); + for (const auto& [_, count] : observed_checksums) { + EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05); + } +} +#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE + +TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { + // Enable the feature even if the prod default is off. + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + + auto& sampler = HashtablezSampler::Global(); + size_t start_size = 0; + start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; }); + + std::vector<CustomAllocIntTable> tables; + for (int i = 0; i < 1000000; ++i) { + tables.emplace_back(); + tables.back().insert(1); + } + size_t end_size = 0; + end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; }); + + EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), + 0.00, 0.001); +} + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER TEST(Sanitizer, PoisoningUnused) { IntTable t; + t.reserve(5); // Insert something to force an allocation. int64_t& v1 = *t.insert(0).first; @@ -1821,8 +1976,9 @@ TEST(Sanitizer, PoisoningOnErase) { t.erase(0); EXPECT_TRUE(__asan_address_is_poisoned(&v)); } -#endif // ADDRESS_SANITIZER +#endif // ABSL_HAVE_ADDRESS_SANITIZER } // namespace } // namespace container_internal +ABSL_NAMESPACE_END } // namespace absl |