summaryrefslogtreecommitdiff
path: root/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h')
-rw-r--r--src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h648
1 files changed, 316 insertions, 332 deletions
diff --git a/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h b/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h
index b7b5ef8c7b4..8615de8bdfa 100644
--- a/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -102,23 +102,36 @@
#include <type_traits>
#include <utility>
-#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/container/internal/have_sse.h"
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/types/optional.h"
+#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
namespace absl {
+ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_on_container_swap */) {
+ using std::swap;
+ swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+ std::false_type /* propagate_on_container_swap */) {}
+
template <size_t Width>
class probe_seq {
public:
@@ -164,32 +177,21 @@ struct IsDecomposable<
std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
-template <class, class = void>
-struct IsTransparent : std::false_type {};
-template <class T>
-struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
- : std::true_type {};
-
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
-constexpr bool IsNoThrowSwappable() {
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
-
-template <typename T>
-int TrailingZeros(T x) {
- return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
- static_cast<uint64_t>(x))
- : base_internal::CountTrailingZerosNonZero32(
- static_cast<uint32_t>(x));
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+ return false;
}
template <typename T>
-int LeadingZeros(T x) {
- return sizeof(T) == 8
- ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
- : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
+uint32_t TrailingZeros(T x) {
+ ABSL_INTERNAL_ASSUME(x != 0);
+ return countr_zero(x);
}
// An abstraction over a bitmask. It provides an easy way to iterate through the
@@ -219,26 +221,24 @@ class BitMask {
}
explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); }
- int LowestBitSet() const {
+ uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- int HighestBitSet() const {
- return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
- 1) >>
- Shift;
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
- int TrailingZeros() const {
+ uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- int LeadingZeros() const {
+ uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
+ return countl_zero(mask_ << extra_bits) >> Shift;
}
private:
@@ -316,7 +316,7 @@ inline bool IsFull(ctrl_t c) { return c >= 0; }
inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -350,12 +350,12 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
// This only works because kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
- return Match(kEmpty);
+ return Match(static_cast<h2_t>(kEmpty));
#endif
}
@@ -369,14 +369,14 @@ struct GroupSse2Impl {
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(kSentinel);
- return TrailingZeros(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -388,7 +388,7 @@ struct GroupSse2Impl {
__m128i ctrl;
};
-#endif // SWISSTABLE_HAVE_SSE2
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -442,7 +442,7 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
using Group = GroupSse2Impl;
#else
using Group = GroupPortableImpl;
@@ -451,9 +451,7 @@ using Group = GroupPortableImpl;
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
-inline bool IsValidCapacity(size_t n) {
- return ((n + 1) & n) == 0 && n >= Group::kWidth - 1;
-}
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// PRECONDITION:
// IsValidCapacity(capacity)
@@ -463,164 +461,112 @@ inline bool IsValidCapacity(size_t n) {
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
-inline void ConvertDeletedToEmptyAndFullToDeleted(
- ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == kSentinel);
- assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
- Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
- }
- // Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
- ctrl[capacity] = kSentinel;
-}
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-// Rounds up the capacity to the next power of 2 minus 1 and ensures it is
-// greater or equal to Group::kWidth - 1.
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- return n <= kMinCapacity
- ? kMinCapacity
- : (std::numeric_limits<size_t>::max)() >> LeadingZeros(n);
+ return n ? ~size_t{} >> countl_zero(n) : 1;
}
-// The node_handle concept from C++17.
-// We specialize node_handle for sets and maps. node_handle_base holds the
-// common API of both.
-template <typename Policy, typename Alloc>
-class node_handle_base {
- protected:
- using PolicyTraits = hash_policy_traits<Policy>;
- using slot_type = typename PolicyTraits::slot_type;
-
- public:
- using allocator_type = Alloc;
-
- constexpr node_handle_base() {}
- node_handle_base(node_handle_base&& other) noexcept {
- *this = std::move(other);
- }
- ~node_handle_base() { destroy(); }
- node_handle_base& operator=(node_handle_base&& other) {
- destroy();
- if (!other.empty()) {
- alloc_ = other.alloc_;
- PolicyTraits::transfer(alloc(), slot(), other.slot());
- other.reset();
- }
- return *this;
- }
-
- bool empty() const noexcept { return !alloc_; }
- explicit operator bool() const noexcept { return !empty(); }
- allocator_type get_allocator() const { return *alloc_; }
-
- protected:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) {
- PolicyTraits::transfer(alloc(), slot(), s);
- }
-
- void destroy() {
- if (!empty()) {
- PolicyTraits::destroy(alloc(), slot());
- reset();
- }
- }
-
- void reset() {
- assert(alloc_.has_value());
- alloc_ = absl::nullopt;
- }
-
- slot_type* slot() const {
- assert(!empty());
- return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity` of the table, returns the size (i.e. number of full slots)
+// at which we should grow the capacity.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
}
- allocator_type* alloc() { return std::addressof(*alloc_); }
-
- private:
- absl::optional<allocator_type> alloc_;
- mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
- slot_space_;
-};
-
-// For sets.
-template <typename Policy, typename Alloc, typename = void>
-class node_handle : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using value_type = typename Base::PolicyTraits::value_type;
-
- constexpr node_handle() {}
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and requires NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
- value_type& value() const {
- return Base::PolicyTraits::element(this->slot());
- }
+inline void AssertIsFull(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
+inline void AssertIsValid(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
};
-// For maps.
-template <typename Policy, typename Alloc>
-class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>>
- : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using key_type = typename Policy::key_type;
- using mapped_type = typename Policy::mapped_type;
-
- constexpr node_handle() {}
-
- auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) {
- return Base::PolicyTraits::key(this->slot());
- }
+// The representation of the object has two modes:
+// - small: For capacities < kWidth-1
+// - large: For the rest.
+//
+// Differences:
+// - In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// - In small mode only the first `capacity()` control bytes after the
+// sentinel are valid. The rest contain dummy kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// find_first_non_full(), where we never try ShouldInsertBackwards() for
+// small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
- mapped_type& mapped() const {
- return Base::PolicyTraits::value(
- &Base::PolicyTraits::element(this->slot()));
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
+// NOTE: this function must work with tables having both kEmpty and kDelete
+// in one group. Such tables appears during drop_deletes_without_resize.
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
+inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ auto seq = probe(ctrl, hash, capacity);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
+ assert(seq.index() < capacity && "full table!");
}
-
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
-};
-
-// Implement the insert_return_type<> concept of C++17.
-template <class Iterator, class NodeType>
-struct insert_return_type {
- Iterator position;
- bool inserted;
- NodeType node;
-};
-
-// Helper trait to allow or disallow arbitrary keys when the hash and
-// eq functions are transparent.
-// It is very important that the inner template is an alias and that the type it
-// produces is not a dependent type. Otherwise, type deduction would fail.
-template <bool is_transparent>
-struct KeyArg {
- // Transparent. Forward `K`.
- template <typename K, typename key_type>
- using type = K;
-};
-
-template <>
-struct KeyArg<false> {
- // Not transparent. Always use `key_type`.
- template <typename K, typename key_type>
- using type = key_type;
-};
+}
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
@@ -636,14 +582,15 @@ struct KeyArg<false> {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set {
using PolicyTraits = hash_policy_traits<Policy>;
- using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
- IsTransparent<Hash>::value>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
public:
using init_type = typename PolicyTraits::init_type;
@@ -741,13 +688,17 @@ class raw_hash_set {
iterator() {}
// PRECONDITION: not an end() iterator.
- reference operator*() const { return PolicyTraits::element(slot_); }
+ reference operator*() const {
+ AssertIsFull(ctrl_);
+ return PolicyTraits::element(slot_);
+ }
// PRECONDITION: not an end() iterator.
pointer operator->() const { return &operator*(); }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
+ AssertIsFull(ctrl_);
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -761,6 +712,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
+ AssertIsValid(a.ctrl_);
+ AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -768,23 +721,27 @@ class raw_hash_set {
}
private:
- iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
- iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ }
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
- // ctrl is not necessarily aligned to Group::kWidth. It is also likely
- // to read past the space for ctrl bytes and into slots. This is ok
- // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
- // is no way to read outside the combined slot array.
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
+ if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
- slot_type* slot_;
+ // To avoid uninitialized member warnings, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
};
class const_iterator {
@@ -824,7 +781,8 @@ class raw_hash_set {
iterator inner_;
};
- using node_type = container_internal::node_handle<Policy, Alloc>;
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
raw_hash_set() noexcept(
std::is_nothrow_default_constructible<hasher>::value&&
@@ -834,10 +792,10 @@ class raw_hash_set {
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+ : ctrl_(EmptyGroup()),
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
initialize_slots();
}
}
@@ -879,8 +837,8 @@ class raw_hash_set {
// that accept std::initializer_list<T> and std::initializer_list<init_type>.
// This is advantageous for performance.
//
- // // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies
- // // the strings into the set.
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+ // // copies the strings into the set.
// std::unordered_set<std::string> s = {"abc", "def"};
//
// // Turns {"abc", "def"} into std::initializer_list<const char*>, then
@@ -943,9 +901,10 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- const size_t i = find_first_non_full(hash);
- set_ctrl(i, H2(hash));
- emplace_at(i, v);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ set_ctrl(target.offset, H2(hash));
+ emplace_at(target.offset, v);
+ infoz().RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -962,23 +921,24 @@ class raw_hash_set {
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(that.settings_) {
- // growth_left was copied above, reset the one from `that`.
- that.growth_left() = 0;
- }
+ settings_(absl::exchange(that.growth_left(), 0),
+ absl::exchange(that.infoz(), HashtablezInfoHandle()),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),
slots_(nullptr),
size_(0),
capacity_(0),
- settings_(0, that.hash_ref(), that.eq_ref(), a) {
+ settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
+ a) {
if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_);
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
+ std::swap(infoz(), that.infoz());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1014,12 +974,12 @@ class raw_hash_set {
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return {ctrl_ + capacity_}; }
+ iterator end() { return {}; }
const_iterator begin() const {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator end() const { return {}; }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
@@ -1028,7 +988,7 @@ class raw_hash_set {
size_t capacity() const { return capacity_; }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
- void clear() {
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
// Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive.
// For clear() it is more important to reuse the allocated array when the
@@ -1046,9 +1006,10 @@ class raw_hash_set {
}
size_ = 0;
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ reset_growth_left();
}
assert(empty());
+ infoz().RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1056,8 +1017,11 @@ class raw_hash_set {
//
// flat_hash_map<std::string, int> m;
// m.insert(std::make_pair("abc", 42));
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
template <class T, RequiresInsertable<T> = 0,
- typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
+ class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
return emplace(std::forward<T>(value));
@@ -1087,14 +1051,16 @@ class raw_hash_set {
// This overload kicks in when the argument is an rvalue of init_type. Its
// purpose is to handle brace-init-list arguments.
//
- // flat_hash_set<std::string, int> s;
+ // flat_hash_map<std::string, int> s;
// s.insert({"abc", 42});
std::pair<iterator, bool> insert(init_type&& value) {
return emplace(std::move(value));
}
- template <class T, RequiresInsertable<T> = 0,
- typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
iterator insert(const_iterator, T&& value) {
return insert(std::forward<T>(value)).first;
@@ -1116,7 +1082,7 @@ class raw_hash_set {
template <class InputIt>
void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) insert(*first);
+ for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1128,13 +1094,14 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type<iterator, node_type> insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) {
if (!node) return {end(), false, node_type()};
- const auto& elem = PolicyTraits::element(node.slot());
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
- InsertSlot<false>{*this, std::move(*node.slot())}, elem);
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
if (res.second) {
- node.reset();
+ CommonAccess::Reset(&node);
return {res.first, true, node_type()};
} else {
return {res.first, false, std::move(node)};
@@ -1142,7 +1109,9 @@ class raw_hash_set {
}
iterator insert(const_iterator, node_type&& node) {
- return insert(std::move(node)).first;
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@@ -1167,8 +1136,7 @@ class raw_hash_set {
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
- typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
- raw;
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
@@ -1184,10 +1152,15 @@ class raw_hash_set {
// Extension API: support for lazy emplace.
//
// Looks up key in the table. If found, returns the iterator to the element.
- // Otherwise calls f with one argument of type raw_hash_set::constructor. f
- // MUST call raw_hash_set::constructor with arguments as if a
- // raw_hash_set::value_type is constructed, otherwise the behavior is
- // undefined.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ //
+ // `f` must abide by several restrictions:
+ // - it MUST call `raw_hash_set::constructor` with arguments as if a
+ // `raw_hash_set::value_type` is constructed,
+ // - it MUST NOT access the container before the call to
+ // `raw_hash_set::constructor`, and
+ // - it MUST NOT erase the lazily emplaced element.
+ // Doing any of these is undefined behavior.
//
// For example:
//
@@ -1250,15 +1223,16 @@ class raw_hash_set {
}
// Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
- // this method returns void to reduce algorithmic complexity to O(1). In
- // order to erase while iterating across a map, use the following idiom (which
- // also works for standard containers):
+ // this method returns void to reduce algorithmic complexity to O(1). The
+ // iterator is invalidated, so any increment should be done before calling
+ // erase. In order to erase while iterating across a map, use the following
+ // idiom (which also works for standard containers):
//
// for (auto it = m.begin(), end = m.end(); it != end;) {
+ // // `erase()` will invalidate `it`, so advance `it` first.
+ // auto copy_it = it++;
// if (<pred>) {
- // m.erase(it++);
- // } else {
- // ++it;
+ // m.erase(copy_it);
// }
// }
void erase(const_iterator cit) { erase(cit.inner_); }
@@ -1266,7 +1240,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- assert(it != end());
+ AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1283,12 +1257,14 @@ class raw_hash_set {
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
assert(this != &src);
- for (auto it = src.begin(), e = src.end(); it != e; ++it) {
+ for (auto it = src.begin(), e = src.end(); it != e;) {
+ auto next = std::next(it);
if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
PolicyTraits::element(it.slot_))
.second) {
src.erase_meta_only(it);
}
+ it = next;
}
}
@@ -1298,7 +1274,9 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- node_type node(alloc_ref(), position.inner_.slot_);
+ AssertIsFull(position.inner_.ctrl_);
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
return node;
}
@@ -1313,8 +1291,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
- (!AllocTraits::propagate_on_container_swap::value ||
- IsNoThrowSwappable<allocator_type>())) {
+ IsNoThrowSwappable<allocator_type>(
+ typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
@@ -1323,18 +1301,21 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- if (AllocTraits::propagate_on_container_swap::value) {
- swap(alloc_ref(), that.alloc_ref());
- } else {
- // If the allocators do not compare equal it is officially undefined
- // behavior. We choose to do nothing.
- }
+ swap(infoz(), that.infoz());
+ SwapAlloc(alloc_ref(), that.alloc_ref(),
+ typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) return destroy_slots();
- auto m = NormalizeCapacity((std::max)(n, NumSlotsFast(size())));
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
+ infoz().RecordStorageChanged(0, 0);
+ return;
+ }
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
@@ -1342,7 +1323,10 @@ class raw_hash_set {
}
void reserve(size_t n) {
- rehash(NumSlotsFast(n));
+ size_t m = GrowthToLowerboundCapacity(n);
+ if (m > capacity_) {
+ resize(NormalizeCapacity(m));
+ }
}
// Extension API: support for heterogeneous keys.
@@ -1368,7 +1352,7 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
#if defined(__GNUC__)
- auto seq = probe(hash_ref()(key));
+ auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
@@ -1383,7 +1367,7 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1394,6 +1378,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
}
template <class K = key_type>
@@ -1521,13 +1506,6 @@ class raw_hash_set {
slot_type&& slot;
};
- // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil.
- static inline size_t NumSlotsFast(size_t n) {
- return static_cast<size_t>(
- (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) /
- kMaxLoadFactorNumerator);
- }
-
// "erases" the object from the container, except that it doesn't actually
// destroy the object. It only updates all the metadata of the class.
// This can be used in conjunction with Policy::transfer to move the object to
@@ -1550,17 +1528,34 @@ class raw_hash_set {
set_ctrl(index, was_never_full ? kEmpty : kDeleted);
growth_left() += was_never_full;
+ infoz().RecordErase();
}
void initialize_slots() {
assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
+ infoz() = Sample();
+ }
+
auto layout = MakeLayout(capacity_);
char* mem = static_cast<char*>(
Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
slots_ = layout.template Pointer<1>(mem);
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz().RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1589,11 +1584,14 @@ class raw_hash_set {
capacity_ = new_capacity;
initialize_slots();
+ size_t total_probe_length = 0;
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
@@ -1605,10 +1603,12 @@ class raw_hash_set {
Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
layout.AllocSize());
}
+ infoz().RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
+ assert(!is_small(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1626,20 +1626,23 @@ class raw_hash_set {
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
- typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
- raw;
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+ size_t total_probe_length = 0;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
const auto probe_index = [&](size_t pos) {
- return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
+ return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
+ Group::kWidth;
};
// Element doesn't move.
@@ -1665,13 +1668,14 @@ class raw_hash_set {
--i; // repeat
}
}
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz().RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
- resize(Group::kWidth - 1);
- } else if (size() <= kMaxLoadFactor / 2 * capacity_) {
+ resize(1);
+ } else if (size() <= CapacityToGrowth(capacity()) / 2) {
// Squash DELETED without growing if there is enough capacity.
drop_deletes_without_resize();
} else {
@@ -1682,7 +1686,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1697,39 +1701,6 @@ class raw_hash_set {
return false;
}
- // Probes the raw_hash_set with the probe sequence for hash and returns the
- // pointer to the first empty or deleted slot.
- // NOTE: this function must work with tables having both kEmpty and kDelete
- // in one group. Such tables appears during drop_deletes_without_resize.
- //
- // This function is very useful when insertions happen and:
- // - the input is already a set
- // - there are enough slots
- // - the element with the hash is not in the table
- size_t find_first_non_full(size_t hash) {
- auto seq = probe(hash);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
- if (mask) {
-#if !defined(NDEBUG)
- // We want to force small tables to have random entries too, so
- // in debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (ShouldInsertBackwards(hash, ctrl_))
- return seq.offset(mask.HighestBitSet());
- else
- return seq.offset(mask.LowestBitSet());
-#else
- return seq.offset(mask.LowestBitSet());
-#endif
- }
- assert(seq.index() < capacity_ && "full table!");
- seq.next();
- }
- }
-
// TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that));
@@ -1746,7 +1717,7 @@ class raw_hash_set {
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
auto hash = hash_ref()(key);
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1757,20 +1728,23 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- size_t target = find_first_non_full(hash);
- if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) {
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(hash);
+ target = find_first_non_full(ctrl_, hash, capacity_);
}
++size_;
- growth_left() -= IsEmpty(ctrl_[target]);
- set_ctrl(target, H2(hash));
- return target;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
+ set_ctrl(target.offset, H2(hash));
+ infoz().RecordInsert(hash, target.probe_length);
+ return target.offset;
}
// Constructs the value in the space pointed by the iterator. This only works
@@ -1797,10 +1771,6 @@ class raw_hash_set {
private:
friend struct RawHashSetTestOnlyAccess;
- probe_seq<Group::kWidth> probe(size_t hash) const {
- return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
- }
-
// Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl() {
std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
@@ -1808,6 +1778,10 @@ class raw_hash_set {
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
}
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
// Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
// the end too.
void set_ctrl(size_t i, ctrl_t h) {
@@ -1820,26 +1794,23 @@ class raw_hash_set {
}
ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h;
+ ctrl_[((i - Group::kWidth) & capacity_) + 1 +
+ ((Group::kWidth - 1) & capacity_)] = h;
}
size_t& growth_left() { return settings_.template get<0>(); }
- hasher& hash_ref() { return settings_.template get<1>(); }
- const hasher& hash_ref() const { return settings_.template get<1>(); }
- key_equal& eq_ref() { return settings_.template get<2>(); }
- const key_equal& eq_ref() const { return settings_.template get<2>(); }
- allocator_type& alloc_ref() { return settings_.template get<3>(); }
+ HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
+
+ hasher& hash_ref() { return settings_.template get<2>(); }
+ const hasher& hash_ref() const { return settings_.template get<2>(); }
+ key_equal& eq_ref() { return settings_.template get<3>(); }
+ const key_equal& eq_ref() const { return settings_.template get<3>(); }
+ allocator_type& alloc_ref() { return settings_.template get<4>(); }
const allocator_type& alloc_ref() const {
- return settings_.template get<3>();
+ return settings_.template get<4>();
}
- // On average each group has 2 empty slot (for the vectorized case).
- static constexpr int64_t kMaxLoadFactorNumerator = 14;
- static constexpr int64_t kMaxLoadFactorDenominator = 16;
- static constexpr float kMaxLoadFactor =
- 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator;
-
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
@@ -1847,11 +1818,24 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
- absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
+ absl::container_internal::CompressedTuple<size_t /* growth_left */,
+ HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, hasher{}, key_equal{}, allocator_type{}};
+ settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ allocator_type{}};
};
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename P, typename H, typename E, typename A, typename Predicate>
+void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+ for (auto it = c->begin(), last = c->end(); it != last;) {
+ auto copy_it = it++;
+ if (pred(*copy_it)) {
+ c->erase(copy_it);
+ }
+ }
+}
+
namespace hashtable_debug_internal {
template <typename Set>
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
@@ -1862,7 +1846,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = set.probe(hash);
+ auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {
@@ -1899,10 +1883,9 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
}
static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = container_internal::NormalizeCapacity(
- std::ceil(size / Set::kMaxLoadFactor));
+ size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
+ auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
size_t m = layout.AllocSize();
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -1914,6 +1897,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
} // namespace hashtable_debug_internal
} // namespace container_internal
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_