1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_BARRIER_INL_H_
#define V8_HEAP_MARKING_BARRIER_INL_H_
#include "src/base/logging.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-barrier.h"
namespace v8 {
namespace internal {
void MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
if (value.InReadOnlySpace()) return;
DCHECK(IsCurrentMarkingBarrier(host));
DCHECK(is_activated_ || shared_heap_worklist_.has_value());
DCHECK_IMPLIES(!value.InWritableSharedSpace() || is_shared_space_isolate_,
!marking_state_.IsImpossible(value));
// Host may have an impossible markbit pattern if manual allocation folding
// is performed and host happens to be the last word of an allocated region.
// In that case host has only one markbit and the second markbit belongs to
// another object. We can detect that case by checking if value is a one word
// filler map.
DCHECK(!marking_state_.IsImpossible(host) ||
value == ReadOnlyRoots(heap_->isolate()).one_pointer_filler_map());
// When shared heap isn't enabled all objects are local, we can just run the
// local marking barrier. Also from the point-of-view of the shared space
// isolate (= main isolate) also shared objects are considered local.
if (V8_UNLIKELY(uses_shared_heap_) && !is_shared_space_isolate_) {
// Check whether incremental marking is enabled for that object's space.
if (!MemoryChunk::FromHeapObject(host)->IsMarking()) {
return;
}
if (host.InWritableSharedSpace()) {
// Invoking shared marking barrier when storing into shared objects.
MarkValueShared(value);
return;
} else if (value.InWritableSharedSpace()) {
// No marking needed when storing shared objects in local objects.
return;
}
}
DCHECK_IMPLIES(host.InWritableSharedSpace(), is_shared_space_isolate_);
DCHECK_IMPLIES(value.InWritableSharedSpace(), is_shared_space_isolate_);
DCHECK(is_activated_);
MarkValueLocal(value);
}
void MarkingBarrier::MarkValueShared(HeapObject value) {
// Value is either in read-only space or shared heap.
DCHECK(value.InAnySharedSpace());
// We should only reach this on client isolates (= worker isolates).
DCHECK(!is_shared_space_isolate_);
DCHECK(shared_heap_worklist_.has_value());
// Mark shared object and push it onto shared heap worklist.
if (marking_state_.TryMark(value)) {
shared_heap_worklist_->Push(value);
}
}
void MarkingBarrier::MarkValueLocal(HeapObject value) {
DCHECK(!value.InReadOnlySpace());
if (is_minor()) {
// We do not need to insert into RememberedSet<OLD_TO_NEW> here because the
// C++ marking barrier already does this for us.
if (Heap::InYoungGeneration(value)) {
WhiteToGreyAndPush(value); // NEW->NEW
}
} else {
if (WhiteToGreyAndPush(value)) {
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
}
}
}
template <typename TSlot>
inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
auto* isolate = heap_->isolate();
const bool record_slots =
IsCompacting(host) &&
!MemoryChunk::FromHeapObject(host)->ShouldSkipEvacuationSlotRecording();
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.Relaxed_Load();
HeapObject heap_object;
// Mark both, weak and strong edges.
if (object.GetHeapObject(isolate, &heap_object)) {
MarkValue(host, heap_object);
if (record_slots) {
major_collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
}
}
}
}
bool MarkingBarrier::IsCompacting(HeapObject object) const {
if (is_compacting_) {
DCHECK(is_major());
return true;
}
return shared_heap_worklist_.has_value() && object.InWritableSharedSpace();
}
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.TryMark(obj)) {
current_worklist_->Push(obj);
return true;
}
return false;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_BARRIER_INL_H_
|