summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/slot-set.h
blob: a3a40885f804f7984bf3b34a0799bd11abeb8512 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_SLOT_SET_H_
#define V8_HEAP_SLOT_SET_H_

#include <map>
#include <memory>
#include <stack>
#include <vector>

#include "src/base/bit-field.h"
#include "src/heap/base/basic-slot-set.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck

namespace v8 {
namespace internal {

using ::heap::base::KEEP_SLOT;
using ::heap::base::REMOVE_SLOT;
using ::heap::base::SlotCallbackResult;

// Possibly empty buckets (buckets that do not contain any slots) are discovered
// by the scavenger. Buckets might become non-empty when promoting objects later
// or in another thread, so all those buckets need to be revisited.
// Track possibly empty buckets within a SlotSet in this data structure. The
// class contains a word-sized bitmap, in case more bits are needed the bitmap
// is replaced with a pointer to a malloc-allocated bitmap.
class PossiblyEmptyBuckets {
 public:
  PossiblyEmptyBuckets() : bitmap_(kNullAddress) {}
  PossiblyEmptyBuckets(PossiblyEmptyBuckets&& other) V8_NOEXCEPT
      : bitmap_(other.bitmap_) {
    other.bitmap_ = kNullAddress;
  }

  ~PossiblyEmptyBuckets() { Release(); }

  PossiblyEmptyBuckets(const PossiblyEmptyBuckets&) = delete;
  PossiblyEmptyBuckets& operator=(const PossiblyEmptyBuckets&) = delete;

  void Initialize() {
    bitmap_ = kNullAddress;
    DCHECK(!IsAllocated());
  }

  void Release() {
    if (IsAllocated()) {
      AlignedFree(BitmapArray());
    }
    bitmap_ = kNullAddress;
    DCHECK(!IsAllocated());
  }

  void Insert(size_t bucket_index, size_t buckets) {
    if (IsAllocated()) {
      InsertAllocated(bucket_index);
    } else if (bucket_index + 1 < kBitsPerWord) {
      bitmap_ |= static_cast<uintptr_t>(1) << (bucket_index + 1);
    } else {
      Allocate(buckets);
      InsertAllocated(bucket_index);
    }
  }

  bool Contains(size_t bucket_index) {
    if (IsAllocated()) {
      size_t word_idx = bucket_index / kBitsPerWord;
      uintptr_t* word = BitmapArray() + word_idx;
      return *word &
             (static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord));
    } else if (bucket_index + 1 < kBitsPerWord) {
      return bitmap_ & (static_cast<uintptr_t>(1) << (bucket_index + 1));
    } else {
      return false;
    }
  }

  bool IsEmpty() { return bitmap_ == kNullAddress; }

 private:
  Address bitmap_;
  static const Address kPointerTag = 1;
  static const int kWordSize = sizeof(uintptr_t);
  static const int kBitsPerWord = kWordSize * kBitsPerByte;

  bool IsAllocated() { return bitmap_ & kPointerTag; }

  void Allocate(size_t buckets) {
    DCHECK(!IsAllocated());
    size_t words = WordsForBuckets(buckets);
    uintptr_t* ptr = reinterpret_cast<uintptr_t*>(
        AlignedAllocWithRetry(words * kWordSize, kSystemPointerSize));
    ptr[0] = bitmap_ >> 1;

    for (size_t word_idx = 1; word_idx < words; word_idx++) {
      ptr[word_idx] = 0;
    }
    bitmap_ = reinterpret_cast<Address>(ptr) + kPointerTag;
    DCHECK(IsAllocated());
  }

  void InsertAllocated(size_t bucket_index) {
    DCHECK(IsAllocated());
    size_t word_idx = bucket_index / kBitsPerWord;
    uintptr_t* word = BitmapArray() + word_idx;
    *word |= static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord);
  }

  static size_t WordsForBuckets(size_t buckets) {
    return (buckets + kBitsPerWord - 1) / kBitsPerWord;
  }

  uintptr_t* BitmapArray() {
    DCHECK(IsAllocated());
    return reinterpret_cast<uintptr_t*>(bitmap_ & ~kPointerTag);
  }

  FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets);
};

static_assert(std::is_standard_layout<PossiblyEmptyBuckets>::value);
static_assert(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);

class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
  using BasicSlotSet = ::heap::base::BasicSlotSet<kTaggedSize>;

 public:
  static const int kBucketsRegularPage =
      (1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;

  static SlotSet* Allocate(size_t buckets) {
    return static_cast<SlotSet*>(BasicSlotSet::Allocate(buckets));
  }

  // Similar to BasicSlotSet::Iterate() but Callback takes the parameter of type
  // MaybeObjectSlot.
  template <typename Callback>
  size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
                 Callback callback, EmptyBucketMode mode) {
    return BasicSlotSet::Iterate(
        chunk_start, start_bucket, end_bucket,
        [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
        [this, mode](size_t bucket_index) {
          if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
            ReleaseBucket(bucket_index);
          }
        });
  }

  // Similar to SlotSet::Iterate() but marks potentially empty buckets
  // internally. Stores true in empty_bucket_found in case a potentially empty
  // bucket was found. Assumes that the possibly empty-array was already cleared
  // by CheckPossiblyEmptyBuckets.
  template <typename Callback>
  size_t IterateAndTrackEmptyBuckets(
      Address chunk_start, size_t start_bucket, size_t end_bucket,
      Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
    return BasicSlotSet::Iterate(
        chunk_start, start_bucket, end_bucket,
        [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
        [possibly_empty_buckets, end_bucket](size_t bucket_index) {
          possibly_empty_buckets->Insert(bucket_index, end_bucket);
        });
  }

  // Check whether possibly empty buckets are really empty. Empty buckets are
  // freed and the possibly empty state is cleared for all buckets.
  bool CheckPossiblyEmptyBuckets(size_t buckets,
                                 PossiblyEmptyBuckets* possibly_empty_buckets) {
    bool empty = true;
    for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
      Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
      if (bucket) {
        if (possibly_empty_buckets->Contains(bucket_index)) {
          if (bucket->IsEmpty()) {
            ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
          } else {
            empty = false;
          }
        } else {
          empty = false;
        }
      } else {
        // Unfortunately we cannot DCHECK here that the corresponding bit in
        // possibly_empty_buckets is not set. After scavenge, the
        // MergeOldToNewRememberedSets operation might remove a recorded bucket.
      }
    }

    possibly_empty_buckets->Release();

    return empty;
  }
};

static_assert(std::is_standard_layout<SlotSet>::value);
static_assert(std::is_standard_layout<SlotSet::Bucket>::value);

enum class SlotType : uint8_t {
  // Full pointer sized slot storing an object start address.
  // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
  // accessing. Used when pointer is stored in the instruction stream.
  kEmbeddedObjectFull,

  // Tagged sized slot storing an object start address.
  // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
  // accessing. Used when pointer is stored in the instruction stream.
  kEmbeddedObjectCompressed,

  // Full pointer sized slot storing instruction start of Code object.
  // RelocInfo::target_address/RelocInfo::set_target_address methods are used
  // for accessing. Used when pointer is stored in the instruction stream.
  kCodeEntry,

  // Raw full pointer sized slot. Slot is accessed directly. Used when pointer
  // is stored in constant pool.
  kConstPoolEmbeddedObjectFull,

  // Raw tagged sized slot. Slot is accessed directly. Used when pointer is
  // stored in constant pool.
  kConstPoolEmbeddedObjectCompressed,

  // Raw full pointer sized slot storing instruction start of Code object. Slot
  // is accessed directly. Used when pointer is stored in constant pool.
  kConstPoolCodeEntry,

  // Slot got cleared but has not been removed from the slot set.
  kCleared,

  kLast = kCleared
};

// Data structure for maintaining a list of typed slots in a page.
// Typed slots can only appear in Code objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunk is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class V8_EXPORT_PRIVATE TypedSlots {
 public:
  static const int kMaxOffset = 1 << 29;
  TypedSlots() = default;
  virtual ~TypedSlots();
  void Insert(SlotType type, uint32_t offset);
  void Merge(TypedSlots* other);

 protected:
  using OffsetField = base::BitField<int, 0, 29>;
  using TypeField = base::BitField<SlotType, 29, 3>;
  struct TypedSlot {
    uint32_t type_and_offset;
  };
  struct Chunk {
    Chunk* next;
    std::vector<TypedSlot> buffer;
  };
  static const size_t kInitialBufferSize = 100;
  static const size_t kMaxBufferSize = 16 * KB;
  static size_t NextCapacity(size_t capacity) {
    return std::min({kMaxBufferSize, capacity * 2});
  }
  Chunk* EnsureChunk();
  Chunk* NewChunk(Chunk* next, size_t capacity);
  Chunk* head_ = nullptr;
  Chunk* tail_ = nullptr;
};

// A multiset of per-page typed slots that allows concurrent iteration
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
 public:
  using FreeRangesMap = std::map<uint32_t, uint32_t>;

  enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };

  explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}

  // Iterate over all slots in the set and for each slot invoke the callback.
  // If the callback returns REMOVE_SLOT then the slot is removed from the set.
  // Returns the new number of slots.
  //
  // Sample usage:
  // Iterate([](SlotType slot_type, Address slot_address) {
  //    if (good(slot_type, slot_address)) return KEEP_SLOT;
  //    else return REMOVE_SLOT;
  // });
  // This can run concurrently to ClearInvalidSlots().
  template <typename Callback>
  int Iterate(Callback callback, IterationMode mode) {
    static_assert(static_cast<uint8_t>(SlotType::kLast) < 8);
    Chunk* chunk = head_;
    Chunk* previous = nullptr;
    int new_count = 0;
    while (chunk != nullptr) {
      bool empty = true;
      for (TypedSlot& slot : chunk->buffer) {
        SlotType type = TypeField::decode(slot.type_and_offset);
        if (type != SlotType::kCleared) {
          uint32_t offset = OffsetField::decode(slot.type_and_offset);
          Address addr = page_start_ + offset;
          if (callback(type, addr) == KEEP_SLOT) {
            new_count++;
            empty = false;
          } else {
            slot = ClearedTypedSlot();
          }
        }
      }
      Chunk* next = chunk->next;
      if (mode == FREE_EMPTY_CHUNKS && empty) {
        // We remove the chunk from the list but let it still point its next
        // chunk to allow concurrent iteration.
        if (previous) {
          StoreNext(previous, next);
        } else {
          StoreHead(next);
        }

        delete chunk;
      } else {
        previous = chunk;
      }
      chunk = next;
    }
    return new_count;
  }

  // Clears all slots that have the offset in the specified ranges.
  // This can run concurrently to Iterate().
  void ClearInvalidSlots(const FreeRangesMap& invalid_ranges);

  // Asserts that there are no recorded slots in the specified ranges.
  void AssertNoInvalidSlots(const FreeRangesMap& invalid_ranges);

  // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
  void FreeToBeFreedChunks();

 private:
  template <typename Callback>
  void IterateSlotsInRanges(Callback callback,
                            const FreeRangesMap& invalid_ranges);

  // Atomic operations used by Iterate and ClearInvalidSlots;
  Chunk* LoadNext(Chunk* chunk) {
    return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
  }
  void StoreNext(Chunk* chunk, Chunk* next) {
    return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
  }
  Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
  void StoreHead(Chunk* chunk) {
    base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
  }
  static TypedSlot ClearedTypedSlot() {
    return TypedSlot{TypeField::encode(SlotType::kCleared) |
                     OffsetField::encode(0)};
  }

  Address page_start_;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_SLOT_SET_H_