blob: a715b3fd856fb1a13157948900eda800dee804c0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/sequential-marking-deque.h"
#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
namespace v8 {
namespace internal {
void SequentialMarkingDeque::SetUp() {
backing_store_ = new base::VirtualMemory(kMaxSize);
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
}
}
void SequentialMarkingDeque::TearDown() { delete backing_store_; }
void SequentialMarkingDeque::StartUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (in_use_) {
// This can happen in mark-compact GC if the incremental marker already
// started using the marking deque.
return;
}
in_use_ = true;
EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
DCHECK(
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
}
void SequentialMarkingDeque::StopUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (!in_use_) return;
DCHECK(IsEmpty());
DCHECK(!overflowed_);
top_ = bottom_ = mask_ = 0;
in_use_ = false;
if (FLAG_concurrent_sweeping) {
StartUncommitTask();
} else {
Uncommit();
}
}
void SequentialMarkingDeque::Clear() {
DCHECK(in_use_);
top_ = bottom_ = 0;
overflowed_ = false;
}
void SequentialMarkingDeque::Uncommit() {
DCHECK(!in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(),
backing_store_committed_size_);
backing_store_committed_size_ = 0;
CHECK(success);
}
void SequentialMarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
if (backing_store_committed_size_ > 0) return;
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (backing_store_->Commit(backing_store_->address(), size, false)) {
backing_store_committed_size_ = size;
break;
}
}
if (backing_store_committed_size_ == 0) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
}
}
void SequentialMarkingDeque::StartUncommitTask() {
if (!uncommit_task_pending_) {
uncommit_task_pending_ = true;
UncommitTask* task = new UncommitTask(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
}
} // namespace internal
} // namespace v8
|