summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc/object-allocator.cc
blob: f0a394a72d348068538a5a84fd85014d90791691 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/cppgc/object-allocator.h"

#include "include/cppgc/allocation.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"

namespace cppgc {
namespace internal {

namespace {

void MarkRangeAsYoung(BasePage& page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION)
  DCHECK_LT(begin, end);

  if (!page.heap().generational_gc_supported()) return;

  // Then, if the page is newly allocated, force the first and last cards to be
  // marked as young.
  const bool new_page =
      (begin == page.PayloadStart()) && (end == page.PayloadEnd());

  auto& age_table = CagedHeapLocalData::Get().age_table;
  age_table.SetAgeForRange(CagedHeap::OffsetFromAddress(begin),
                           CagedHeap::OffsetFromAddress(end),
                           AgeTable::Age::kYoung,
                           new_page ? AgeTable::AdjacentCardsPolicy::kIgnore
                                    : AgeTable::AdjacentCardsPolicy::kConsider);
  page.set_as_containing_young_objects(true);
#endif  // defined(CPPGC_YOUNG_GENERATION)
}

void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
  // No need for SetMemoryInaccessible() as LAB memory is retrieved as free
  // inaccessible memory.
  space.free_list().Add({start, size});
  // Concurrent marking may be running while the LAB is set up next to a live
  // object sharing the same cell in the bitmap.
  NormalPage::From(BasePage::FromPayload(start))
      ->object_start_bitmap()
      .SetBit<AccessMode::kAtomic>(start);
}

void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
                                   StatsCollector& stats_collector,
                                   Address new_buffer, size_t new_size) {
  auto& lab = space.linear_allocation_buffer();
  if (lab.size()) {
    AddToFreeList(space, lab.start(), lab.size());
    stats_collector.NotifyExplicitFree(lab.size());
  }

  lab.Set(new_buffer, new_size);
  if (new_size) {
    DCHECK_NOT_NULL(new_buffer);
    stats_collector.NotifyAllocation(new_size);
    auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
    // Concurrent marking may be running while the LAB is set up next to a live
    // object sharing the same cell in the bitmap.
    page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
    MarkRangeAsYoung(*page, new_buffer, new_buffer + new_size);
  }
}

void* TryAllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
                             StatsCollector& stats_collector, size_t size,
                             GCInfoIndex gcinfo) {
  LargePage* page = LargePage::TryCreate(page_backend, space, size);
  if (!page) return nullptr;

  space.AddPage(page);

  auto* header = new (page->ObjectHeader())
      HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);

  stats_collector.NotifyAllocation(size);
  MarkRangeAsYoung(*page, page->PayloadStart(), page->PayloadEnd());

  return header->ObjectStart();
}

}  // namespace

constexpr size_t ObjectAllocator::kSmallestSpaceSize;

ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
                                 StatsCollector& stats_collector,
                                 PreFinalizerHandler& prefinalizer_handler,
                                 FatalOutOfMemoryHandler& oom_handler,
                                 GarbageCollector& garbage_collector)
    : raw_heap_(heap),
      page_backend_(page_backend),
      stats_collector_(stats_collector),
      prefinalizer_handler_(prefinalizer_handler),
      oom_handler_(oom_handler),
      garbage_collector_(garbage_collector) {}

void ObjectAllocator::OutOfLineAllocateGCSafePoint(NormalPageSpace& space,
                                                   size_t size,
                                                   AlignVal alignment,
                                                   GCInfoIndex gcinfo,
                                                   void** object) {
  *object = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
  stats_collector_.NotifySafePointForConservativeCollection();
  if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
    // Objects allocated during pre finalizers should be allocated as black
    // since marking is already done. Atomics are not needed because there is
    // no concurrent marking in the background.
    HeapObjectHeader::FromObject(*object).MarkNonAtomic();
    // Resetting the allocation buffer forces all further allocations in pre
    // finalizers to go through this slow path.
    ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
    prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
  }
}

void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
                                             size_t size, AlignVal alignment,
                                             GCInfoIndex gcinfo) {
  DCHECK_EQ(0, size & kAllocationMask);
  DCHECK_LE(kFreeListEntrySize, size);
  // Out-of-line allocation allows for checking this is all situations.
  CHECK(!in_disallow_gc_scope());

  // If this allocation is big enough, allocate a large object.
  if (size >= kLargeObjectSizeThreshold) {
    auto& large_space = LargePageSpace::From(
        *raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
    // LargePage has a natural alignment that already satisfies
    // `kMaxSupportedAlignment`.
    void* result = TryAllocateLargeObject(page_backend_, large_space,
                                          stats_collector_, size, gcinfo);
    if (!result) {
      auto config = GCConfig::ConservativeAtomicConfig();
      config.free_memory_handling =
          GCConfig::FreeMemoryHandling::kDiscardWherePossible;
      garbage_collector_.CollectGarbage(config);
      result = TryAllocateLargeObject(page_backend_, large_space,
                                      stats_collector_, size, gcinfo);
      if (!result) {
        oom_handler_("Oilpan: Large allocation.");
      }
    }
    return result;
  }

  size_t request_size = size;
  // Adjust size to be able to accommodate alignment.
  const size_t dynamic_alignment = static_cast<size_t>(alignment);
  if (dynamic_alignment != kAllocationGranularity) {
    CHECK_EQ(2 * sizeof(HeapObjectHeader), dynamic_alignment);
    request_size += kAllocationGranularity;
  }

  if (!TryRefillLinearAllocationBuffer(space, request_size)) {
    auto config = GCConfig::ConservativeAtomicConfig();
    config.free_memory_handling =
        GCConfig::FreeMemoryHandling::kDiscardWherePossible;
    garbage_collector_.CollectGarbage(config);
    if (!TryRefillLinearAllocationBuffer(space, request_size)) {
      oom_handler_("Oilpan: Normal allocation.");
    }
  }

  // The allocation must succeed, as we just refilled the LAB.
  void* result = (dynamic_alignment == kAllocationGranularity)
                     ? AllocateObjectOnSpace(space, size, gcinfo)
                     : AllocateObjectOnSpace(space, size, alignment, gcinfo);
  CHECK(result);
  return result;
}

bool ObjectAllocator::TryExpandAndRefillLinearAllocationBuffer(
    NormalPageSpace& space) {
  auto* const new_page = NormalPage::TryCreate(page_backend_, space);
  if (!new_page) return false;

  space.AddPage(new_page);
  // Set linear allocation buffer to new page.
  ReplaceLinearAllocationBuffer(space, stats_collector_,
                                new_page->PayloadStart(),
                                new_page->PayloadSize());
  return true;
}

bool ObjectAllocator::TryRefillLinearAllocationBuffer(NormalPageSpace& space,
                                                      size_t size) {
  // Try to allocate from the freelist.
  if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;

  Sweeper& sweeper = raw_heap_.heap()->sweeper();
  // Lazily sweep pages of this heap. This is not exhaustive to limit jank on
  // allocation. Allocation from the free list may still fail as actual  buckets
  // are not exhaustively searched for a suitable block. Instead, buckets are
  // tested from larger sizes that are guaranteed to fit the block to smaller
  // bucket sizes that may only potentially fit the block. For the bucket that
  // may exactly fit the allocation of `size` bytes (no overallocation), only
  // the first entry is checked.
  if (sweeper.SweepForAllocationIfRunning(
          &space, size, v8::base::TimeDelta::FromMicroseconds(500)) &&
      TryRefillLinearAllocationBufferFromFreeList(space, size)) {
    return true;
  }

  // Sweeping was off or did not yield in any memory within limited
  // contributing. We expand at this point as that's cheaper than possibly
  // continuing sweeping the whole heap.
  if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;

  // Expansion failed. Before finishing all sweeping, finish sweeping of a given
  // space which is cheaper.
  if (sweeper.SweepForAllocationIfRunning(&space, size,
                                          v8::base::TimeDelta::Max()) &&
      TryRefillLinearAllocationBufferFromFreeList(space, size)) {
    return true;
  }

  // Heap expansion and sweeping of a space failed. At this point the caller
  // could run OOM or do a full GC which needs to finish sweeping if it's
  // running. Hence, we may as well finish sweeping here. Note that this is
  // possibly very expensive but not more expensive than running a full GC as
  // the alternative is OOM.
  if (sweeper.FinishIfRunning()) {
    // Sweeping may have added memory to the free list.
    if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;

    // Sweeping may have freed pages completely.
    if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
  }
  return false;
}

bool ObjectAllocator::TryRefillLinearAllocationBufferFromFreeList(
    NormalPageSpace& space, size_t size) {
  const FreeList::Block entry = space.free_list().Allocate(size);
  if (!entry.address) return false;

  // Assume discarded memory on that page is now zero.
  auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
  if (page.discarded_memory()) {
    stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
    page.ResetDiscardedMemory();
  }

  ReplaceLinearAllocationBuffer(
      space, stats_collector_, static_cast<Address>(entry.address), entry.size);
  return true;
}

void ObjectAllocator::ResetLinearAllocationBuffers() {
  class Resetter : public HeapVisitor<Resetter> {
   public:
    explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}

    bool VisitLargePageSpace(LargePageSpace&) { return true; }

    bool VisitNormalPageSpace(NormalPageSpace& space) {
      ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
      return true;
    }

   private:
    StatsCollector& stats_collector_;
  } visitor(stats_collector_);

  visitor.Traverse(raw_heap_);
}

void ObjectAllocator::MarkAllPagesAsYoung() {
  class YoungMarker : public HeapVisitor<YoungMarker> {
   public:
    bool VisitNormalPage(NormalPage& page) {
      MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
      return true;
    }

    bool VisitLargePage(LargePage& page) {
      MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
      return true;
    }
  } visitor;
  USE(visitor);

#if defined(CPPGC_YOUNG_GENERATION)
  visitor.Traverse(raw_heap_);
#endif  // defined(CPPGC_YOUNG_GENERATION)
}

bool ObjectAllocator::in_disallow_gc_scope() const {
  return raw_heap_.heap()->in_disallow_gc_scope();
}

}  // namespace internal
}  // namespace cppgc