summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/off-thread-heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/heap/off-thread-heap.cc')
-rw-r--r--chromium/v8/src/heap/off-thread-heap.cc191
1 files changed, 139 insertions, 52 deletions
diff --git a/chromium/v8/src/heap/off-thread-heap.cc b/chromium/v8/src/heap/off-thread-heap.cc
index fec93f80685..584fe349717 100644
--- a/chromium/v8/src/heap/off-thread-heap.cc
+++ b/chromium/v8/src/heap/off-thread-heap.cc
@@ -4,10 +4,14 @@
#include "src/heap/off-thread-heap.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/handles/off-thread-transfer-handle-storage-inl.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/spaces.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/roots/roots.h"
+#include "src/snapshot/references.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -15,7 +19,16 @@
namespace v8 {
namespace internal {
-OffThreadHeap::OffThreadHeap(Heap* heap) : space_(heap), lo_space_(heap) {}
+OffThreadHeap::~OffThreadHeap() = default;
+
+OffThreadHeap::OffThreadHeap(Heap* heap)
+ : space_(heap),
+ lo_space_(heap),
+ off_thread_transfer_handles_head_(nullptr) {}
+
+bool OffThreadHeap::Contains(HeapObject obj) {
+ return space_.Contains(obj) || lo_space_.Contains(obj);
+}
class OffThreadHeap::StringSlotCollectingVisitor : public ObjectVisitor {
public:
@@ -74,6 +87,13 @@ void OffThreadHeap::FinishOffThread() {
string_slots_ = std::move(string_slot_collector.string_slots);
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertFromOffThreadHandleOnFinish();
+ storage = storage->next();
+ }
+
is_finished = true;
}
@@ -82,25 +102,70 @@ void OffThreadHeap::Publish(Heap* heap) {
Isolate* isolate = heap->isolate();
ReadOnlyRoots roots(isolate);
+ // Before we do anything else, ensure that the old-space can expand to the
+ // size needed for the off-thread objects. Use capacity rather than size since
+ // we're adding entire pages.
+ size_t off_thread_size = space_.Capacity() + lo_space_.Size();
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->FatalProcessOutOfMemory(
+ "Can't expand old-space enough to merge off-thread pages.");
+ }
+ }
+
+ // Merging and transferring handles should be atomic from the point of view
+ // of the GC, since we neither want the GC to walk main-thread handles that
+ // point into off-thread pages, nor do we want the GC to move the raw
+ // pointers we have into off-thread pages before we've had a chance to turn
+ // them into real handles.
+ // TODO(leszeks): This could be a stronger assertion, that we don't GC at
+ // all.
+ DisallowHeapAllocation no_gc;
+
+ // Merge the spaces.
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.Merge");
+
+ heap->old_space()->MergeLocalSpace(&space_);
+ heap->lo_space()->MergeOffThreadSpace(&lo_space_);
+
+ DCHECK(heap->CanExpandOldGeneration(0));
+ }
+
+ // Transfer all the transfer handles to be real handles. Make sure to do this
+ // before creating any handle scopes, to allow these handles to live in the
+ // caller's handle scope.
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertToHandleOnPublish(isolate, &no_gc);
+ storage = storage->next();
+ }
+
+ // Create a new handle scope after transferring handles, for the slot holder
+ // handles below.
HandleScope handle_scope(isolate);
- // First, handlify all the string slot holder objects, so that we can keep
- // track of them if they move.
+ // Handlify all the string slot holder objects, so that we can keep track of
+ // them if they move.
//
// TODO(leszeks): We might be able to create a HandleScope-compatible
- // structure off-thread and merge it into the current handle scope all in one
- // go (DeferredHandles maybe?).
- std::vector<Handle<HeapObject>> heap_object_handles;
+ // structure off-thread and merge it into the current handle scope all in
+ // one go (DeferredHandles maybe?).
+ std::vector<std::pair<Handle<HeapObject>, Handle<Map>>> heap_object_handles;
std::vector<Handle<Script>> script_handles;
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.CollectHandles");
heap_object_handles.reserve(string_slots_.size());
for (RelativeSlot relative_slot : string_slots_) {
- // TODO(leszeks): Group slots in the same parent object to avoid creating
- // multiple duplicate handles.
+ // TODO(leszeks): Group slots in the same parent object to avoid
+ // creating multiple duplicate handles.
HeapObject obj = HeapObject::FromAddress(relative_slot.object_address);
- heap_object_handles.push_back(handle(obj, isolate));
+ heap_object_handles.push_back(
+ {handle(obj, isolate), handle(obj.map(), isolate)});
// De-internalize the string so that we can re-internalize it later.
String string =
@@ -116,46 +181,20 @@ void OffThreadHeap::Publish(Heap* heap) {
}
}
- // Then merge the spaces. At this point, we are allowed to point between (no
- // longer) off-thread pages and main-thread heap pages, and objects in the
- // previously off-thread page can move.
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.Merge");
- Heap* heap = isolate->heap();
+ // After this point, all objects are transferred and all handles are valid,
+ // so we can GC again.
+ no_gc.Release();
- // Ensure that the old-space can expand do the size needed for the
- // off-thread objects. Use capacity rather than size since we're adding
- // entire pages.
- size_t off_thread_size = space_.Capacity() + lo_space_.Size();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->InvokeNearHeapLimitCallback();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->FatalProcessOutOfMemory(
- "Can't expand old-space enough to merge off-thread pages.");
- }
- }
- }
+ // Possibly trigger a GC if we're close to exhausting the old generation.
+ // TODO(leszeks): Adjust the heuristics here.
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
- heap->old_space()->MergeLocalSpace(&space_);
- heap->lo_space()->MergeOffThreadSpace(&lo_space_);
-
- DCHECK(heap->CanExpandOldGeneration(0));
- heap->NotifyOldGenerationExpansion();
-
- // Possibly trigger a GC if we're close to exhausting the old generation.
- // TODO(leszeks): Adjust the heuristics here.
- heap->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
-
- if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
- !heap->CanExpandOldGeneration(1 * MB)) {
- heap->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kAllocationFailure);
- }
+ if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
+ !heap->CanExpandOldGeneration(1 * MB)) {
+ heap->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kAllocationFailure);
}
// Iterate the string slots, as an offset from the holders we have handles to.
@@ -163,12 +202,13 @@ void OffThreadHeap::Publish(Heap* heap) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.UpdateHandles");
for (size_t i = 0; i < string_slots_.size(); ++i) {
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
int slot_offset = string_slots_[i].slot_offset;
// There's currently no cases where the holder object could have been
// resized.
- DCHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
String string = String::cast(RELAXED_READ_FIELD(obj, slot_offset));
if (string.IsThinString()) {
@@ -188,8 +228,14 @@ void OffThreadHeap::Publish(Heap* heap) {
if (*string_handle != *internalized_string) {
// Re-read the object from the handle in case there was GC during
// internalization and it moved.
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
String value = *internalized_string;
+
+ // Sanity checks that the object or string slot value hasn't changed.
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(RELAXED_READ_FIELD(obj, slot_offset), *string_handle);
+
RELAXED_WRITE_FIELD(obj, slot_offset, value);
WRITE_BARRIER(obj, slot_offset, value);
}
@@ -223,7 +269,37 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
} else {
result = space_.AllocateRaw(size, alignment);
}
- return result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
+ OnAllocationEvent(obj, size);
+ return obj;
+}
+
+bool OffThreadHeap::ReserveSpace(Heap::Reservation* reservations) {
+#ifdef DEBUG
+ for (int space = FIRST_SPACE;
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); space++) {
+ if (space == OLD_SPACE || space == LO_SPACE) continue;
+ Heap::Reservation* reservation = &reservations[space];
+ DCHECK_EQ(reservation->size(), 1);
+ DCHECK_EQ(reservation->at(0).size, 0);
+ }
+#endif
+
+ for (auto& chunk : reservations[OLD_SPACE]) {
+ int size = chunk.size;
+ AllocationResult allocation = space_.AllocateRawUnaligned(size);
+ HeapObject free_space = allocation.ToObjectChecked();
+
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ Address free_space_address = free_space.address();
+ CreateFillerObjectAt(free_space_address, size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ chunk.start = free_space_address;
+ chunk.end = free_space_address + size;
+ }
+
+ return true;
}
HeapObject OffThreadHeap::CreateFillerObjectAt(
@@ -234,6 +310,17 @@ HeapObject OffThreadHeap::CreateFillerObjectAt(
return filler;
}
+OffThreadTransferHandleStorage* OffThreadHeap::AddTransferHandleStorage(
+ HandleBase handle) {
+ DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr,
+ off_thread_transfer_handles_head_->state() ==
+ OffThreadTransferHandleStorage::kOffThreadHandle);
+ off_thread_transfer_handles_head_ =
+ std::make_unique<OffThreadTransferHandleStorage>(
+ handle.location(), std::move(off_thread_transfer_handles_head_));
+ return off_thread_transfer_handles_head_.get();
+}
+
} // namespace internal
} // namespace v8