summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc1
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h1
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc11
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--deps/v8/src/heap/code-stats.cc2
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc22
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc19
-rw-r--r--deps/v8/src/heap/embedder-tracing.h21
-rw-r--r--deps/v8/src/heap/factory-inl.h12
-rw-r--r--deps/v8/src/heap/factory.cc287
-rw-r--r--deps/v8/src/heap/factory.h40
-rw-r--r--deps/v8/src/heap/gc-tracer.cc2
-rw-r--r--deps/v8/src/heap/heap-controller.cc75
-rw-r--r--deps/v8/src/heap/heap-controller.h71
-rw-r--r--deps/v8/src/heap/heap-inl.h84
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h157
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h51
-rw-r--r--deps/v8/src/heap/heap.cc448
-rw-r--r--deps/v8/src/heap/heap.h69
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h16
-rw-r--r--deps/v8/src/heap/incremental-marking.cc19
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h6
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc9
-rw-r--r--deps/v8/src/heap/invalidated-slots.h1
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h109
-rw-r--r--deps/v8/src/heap/local-allocator.h98
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h26
-rw-r--r--deps/v8/src/heap/mark-compact.cc94
-rw-r--r--deps/v8/src/heap/mark-compact.h11
-rw-r--r--deps/v8/src/heap/object-stats.cc75
-rw-r--r--deps/v8/src/heap/object-stats.h8
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h1
-rw-r--r--deps/v8/src/heap/objects-visiting.h79
-rw-r--r--deps/v8/src/heap/remembered-set.h5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h5
-rw-r--r--deps/v8/src/heap/scavenger.cc6
-rw-r--r--deps/v8/src/heap/scavenger.h4
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc59
-rw-r--r--deps/v8/src/heap/spaces-inl.h7
-rw-r--r--deps/v8/src/heap/spaces.cc49
-rw-r--r--deps/v8/src/heap/spaces.h30
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h36
-rw-r--r--deps/v8/src/heap/store-buffer.cc71
-rw-r--r--deps/v8/src/heap/store-buffer.h86
-rw-r--r--deps/v8/src/heap/sweeper.cc4
47 files changed, 1432 insertions, 867 deletions
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index bce22c39ba..2c28f46a85 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -6,6 +6,7 @@
#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
namespace v8 {
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index b44af2f2ad..74a28c3d06 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -8,7 +8,7 @@
#include <vector>
#include "src/base/platform/mutex.h"
-#include "src/objects/js-array.h"
+#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 697d4405d8..e0d862aed7 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -10,6 +10,7 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 4f92e7e17c..0a158e3543 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -45,24 +45,25 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t size = NumberToSize(new_buffer->byte_length());
+ const size_t length = it->second.length;
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
DCHECK_EQ(it->first->is_wasm_memory(), it->second.is_wasm_memory);
old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, it->second.length);
- tracker->Add(new_buffer, size);
+ ExternalBackingStoreType::kArrayBuffer, length);
+ tracker->Add(new_buffer, length);
}
moved_memory += it->second.length;
} else if (result == kRemoveEntry) {
- freed_memory += it->second.length;
+ const size_t length = it->second.length;
+ freed_memory += length;
// We pass backing_store() and stored length to the collector for freeing
// the backing store. Wasm allocations will go through their own tracker
// based on the backing store.
backing_stores_to_free.push_back(it->second);
old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, it->second.length);
+ ExternalBackingStoreType::kArrayBuffer, length);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 347260dde0..e60fe6c6c0 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -10,7 +10,7 @@
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
#include "src/globals.h"
-#include "src/objects/js-array.h"
+#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 1753e29afd..5d8c2ab527 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/heap/code-stats.h"
+
#include "src/objects-inl.h"
+#include "src/reloc-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 4d41df88af..f6eabbb021 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -310,7 +310,6 @@ class ConcurrentMarkingVisitor final
VisitPointer(map, HeapObject::RawMaybeWeakField(
map, Map::kTransitionsOrPrototypeInfoOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
- VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
bailout_.Push(map);
}
return 0;
@@ -333,26 +332,6 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitWeakCell(Map* map, WeakCell* object) {
- if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object->map_slot());
- if (!object->cleared()) {
- HeapObject* value = HeapObject::cast(object->value());
- if (marking_state_.IsBlackOrGrey(value)) {
- // Weak cells with live values are directly processed here to reduce
- // the processing time of weak cells during the main GC pause.
- Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
- MarkCompactCollector::RecordSlot(object, slot, value);
- } else {
- // If we do not know about liveness of values of weak cells, we have to
- // process them when we know the liveness of the whole transitive
- // closure.
- weak_objects_->weak_cells.Push(task_id_, object);
- }
- }
- return WeakCell::BodyDescriptor::SizeOf(map, object);
- }
-
int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
return VisitJSObjectSubclass(map, object);
}
@@ -648,7 +627,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
- weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index d8659ec889..bf6d5f3b90 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -34,15 +34,17 @@ void LocalEmbedderHeapTracer::AbortTracing() {
void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
- remote_tracer_->EnterFinalPause();
+ remote_tracer_->EnterFinalPause(embedder_stack_state_);
+ // Resetting to state unknown as there may be follow up garbage collections
+ // triggered from callbacks that have a different stack state.
+ embedder_stack_state_ = EmbedderHeapTracer::kUnknown;
}
-bool LocalEmbedderHeapTracer::Trace(
- double deadline, EmbedderHeapTracer::AdvanceTracingActions actions) {
- if (!InUse()) return false;
+bool LocalEmbedderHeapTracer::Trace(double deadline) {
+ if (!InUse()) return true;
DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
- return remote_tracer_->AdvanceTracing(deadline, actions);
+ return remote_tracer_->AdvanceTracing(deadline);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
@@ -67,5 +69,12 @@ bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
return cached_wrappers_to_trace_.size() > kTooManyWrappers;
}
+void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ if (!InUse()) return;
+
+ embedder_stack_state_ = stack_state;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 994695942b..ab8a46bb53 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -18,10 +18,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
typedef std::pair<void*, void*> WrapperInfo;
- explicit LocalEmbedderHeapTracer(Isolate* isolate)
- : isolate_(isolate),
- remote_tracer_(nullptr),
- num_v8_marking_worklist_was_empty_(0) {}
+ explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
@@ -35,14 +32,13 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
- bool InUse() { return remote_tracer_ != nullptr; }
+ bool InUse() const { return remote_tracer_ != nullptr; }
void TracePrologue();
void TraceEpilogue();
void AbortTracing();
void EnterFinalPause();
- bool Trace(double deadline,
- EmbedderHeapTracer::AdvanceTracingActions actions);
+ bool Trace(double deadline);
bool IsRemoteTracingDone();
size_t NumberOfCachedWrappersToTrace() {
@@ -68,13 +64,20 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
+ void SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
private:
typedef std::vector<WrapperInfo> WrapperCache;
Isolate* const isolate_;
- EmbedderHeapTracer* remote_tracer_;
WrapperCache cached_wrappers_to_trace_;
- size_t num_v8_marking_worklist_was_empty_;
+ EmbedderHeapTracer* remote_tracer_ = nullptr;
+ size_t num_v8_marking_worklist_was_empty_ = 0;
+ EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
+ EmbedderHeapTracer::kUnknown;
+
+ friend class EmbedderStackStateScope;
};
} // namespace internal
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 85f2679b3f..614c6ec174 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -7,6 +7,8 @@
#include "src/heap/factory.h"
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/string-hasher.h"
@@ -162,8 +164,14 @@ Handle<Object> Factory::NewURIError() {
MessageTemplate::kURIMalformed);
}
-Handle<String> Factory::Uint32ToString(uint32_t value) {
- Handle<String> result = NumberToString(NewNumberFromUint(value));
+Handle<String> Factory::Uint32ToString(uint32_t value, bool check_cache) {
+ Handle<String> result;
+ int32_t int32v = static_cast<int32_t>(value);
+ if (int32v >= 0 && Smi::IsValid(int32v)) {
+ result = NumberToString(Smi::FromInt(int32v), check_cache);
+ } else {
+ result = NumberToString(NewNumberFromUint(value), check_cache);
+ }
if (result->length() <= String::kMaxArrayIndexSize &&
result->hash_field() == String::kEmptyHashField) {
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index a04e2e734b..c8528f9fdb 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -21,7 +21,9 @@
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
@@ -208,6 +210,7 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
result->set_prototype_users(*empty_weak_array_list());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
+ result->set_module_namespace(*undefined_value());
return result;
}
@@ -926,7 +929,7 @@ Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
- external_string->set_resource(nullptr);
+ external_string->SetResource(isolate(), nullptr);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
}
@@ -1250,7 +1253,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1283,7 +1286,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
ExternalTwoByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1299,7 +1302,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1350,23 +1353,19 @@ Handle<Symbol> Factory::NewPrivateFieldSymbol() {
return symbol;
}
-Handle<Context> Factory::NewNativeContext() {
- Handle<Context> context = NewFixedArrayWithMap<Context>(
+Handle<NativeContext> Factory::NewNativeContext() {
+ Handle<NativeContext> context = NewFixedArrayWithMap<NativeContext>(
Heap::kNativeContextMapRootIndex, Context::NATIVE_CONTEXT_SLOTS, TENURED);
context->set_native_context(*context);
context->set_errors_thrown(Smi::kZero);
context->set_math_random_index(Smi::kZero);
- Handle<WeakCell> weak_cell = NewWeakCell(context);
- context->set_self_weak_cell(*weak_cell);
context->set_serialized_objects(*empty_fixed_array());
- DCHECK(context->IsNativeContext());
return context;
}
-Handle<Context> Factory::NewScriptContext(Handle<Context> outer,
+Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- DCHECK(outer->IsNativeContext());
Handle<Context> context = NewFixedArrayWithMap<Context>(
Heap::kScriptContextMapRootIndex, scope_info->ContextLength(), TENURED);
context->set_scope_info(*scope_info);
@@ -1387,7 +1386,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
}
Handle<Context> Factory::NewModuleContext(Handle<Module> module,
- Handle<Context> outer,
+ Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
Handle<Context> context = NewFixedArrayWithMap<Context>(
@@ -1482,7 +1481,7 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
return context;
}
-Handle<Context> Factory::NewBuiltinContext(Handle<Context> native_context,
+Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
int length) {
DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
Handle<Context> context =
@@ -1555,8 +1554,10 @@ Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
SKIP_WRITE_BARRIER);
script->set_flags(0);
script->set_host_defined_options(*empty_fixed_array());
- heap->set_script_list(
- *FixedArrayOfWeakCells::Add(isolate(), script_list(), script));
+ Handle<WeakArrayList> scripts = script_list();
+ scripts = WeakArrayList::AddToEnd(isolate(), scripts,
+ MaybeObjectHandle::Weak(script));
+ heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return script;
}
@@ -1581,8 +1582,10 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script->set_eval_from_position(script->eval_from_position());
new_script->set_flags(script->flags());
new_script->set_host_defined_options(script->host_defined_options());
- heap->set_script_list(
- *FixedArrayOfWeakCells::Add(isolate(), script_list(), new_script));
+ Handle<WeakArrayList> scripts = script_list();
+ scripts = WeakArrayList::AddToEnd(isolate(), scripts,
+ MaybeObjectHandle::Weak(new_script));
+ heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return new_script;
}
@@ -1680,6 +1683,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
PretenureFlag pretenure) {
+ // TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
int size = FixedTypedArrayBase::kHeaderSize;
HeapObject* result = AllocateRawWithImmortalMap(
@@ -1695,6 +1699,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
size_t length, size_t byte_length, ExternalArrayType array_type,
bool initialize, PretenureFlag pretenure) {
+ // TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
size_t size =
@@ -1761,7 +1766,7 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
HeapObject* result = AllocateRawWithImmortalMap(
PropertyCell::kSize, pretenure, *global_property_cell_map());
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
- cell->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_property_details(PropertyDetails(Smi::kZero));
cell->set_name(*name);
@@ -1769,19 +1774,6 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
return cell;
}
-Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value,
- PretenureFlag pretenure) {
- // It is safe to dereference the value because we are embedding it
- // in cell and not inspecting its fields.
- AllowDeferredHandleDereference convert_to_cell;
- STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result =
- AllocateRawWithImmortalMap(WeakCell::kSize, pretenure, *weak_cell_map());
- Handle<WeakCell> cell(WeakCell::cast(result), isolate());
- cell->initialize(*value);
- return cell;
-}
-
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
@@ -1850,9 +1842,8 @@ Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
map->set_inobject_properties_start_or_constructor_function_index(0);
map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
}
- map->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ map->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::kZero);
map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->SetInObjectUnusedPropertyFields(inobject_properties);
map->set_instance_descriptors(*empty_descriptor_array());
@@ -2311,7 +2302,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
DCHECK(!args.name_.is_null());
// Create the SharedFunctionInfo.
- Handle<Context> context(isolate()->native_context());
+ Handle<NativeContext> context(isolate()->native_context());
Handle<Map> map = args.GetMap(isolate());
Handle<SharedFunctionInfo> info =
NewSharedFunctionInfo(args.name_, args.maybe_exported_function_data_,
@@ -2392,8 +2383,8 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
- Handle<Context> native_context(function->context()->native_context(),
- isolate());
+ Handle<NativeContext> native_context(function->context()->native_context(),
+ isolate());
Handle<Map> new_map;
if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared()->kind()))) {
new_map = handle(native_context->async_generator_object_prototype_map(),
@@ -2513,13 +2504,15 @@ Handle<PreParsedScopeData> Factory::NewPreParsedScopeData(int length) {
}
Handle<UncompiledDataWithoutPreParsedScope>
-Factory::NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+Factory::NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
+ int32_t start_position,
int32_t end_position,
int32_t function_literal_id) {
Handle<UncompiledDataWithoutPreParsedScope> result(
UncompiledDataWithoutPreParsedScope::cast(
New(uncompiled_data_without_pre_parsed_scope_map(), TENURED)),
isolate());
+ result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
@@ -2530,12 +2523,14 @@ Factory::NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
Handle<UncompiledDataWithPreParsedScope>
Factory::NewUncompiledDataWithPreParsedScope(
- int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<String> inferred_name, int32_t start_position, int32_t end_position,
+ int32_t function_literal_id,
Handle<PreParsedScopeData> pre_parsed_scope_data) {
Handle<UncompiledDataWithPreParsedScope> result(
UncompiledDataWithPreParsedScope::cast(
New(uncompiled_data_with_pre_parsed_scope_map(), TENURED)),
isolate());
+ result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
@@ -2728,7 +2723,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// allocation is on.
heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
// Record all references to embedded objects in the new code object.
- heap->RecordWritesIntoCode(*new_code);
+ WriteBarrierForCode(*new_code);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
@@ -2918,7 +2913,7 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- Context* native_context = isolate()->raw_native_context();
+ NativeContext* native_context = isolate()->raw_native_context();
Map* map = native_context->GetInitialJSArrayMap(elements_kind);
if (map == nullptr) {
JSFunction* array_function = native_context->array_function();
@@ -2985,7 +2980,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
- Context* native_context = isolate()->raw_native_context();
+ NativeContext* native_context = isolate()->raw_native_context();
Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
isolate());
@@ -3124,10 +3119,10 @@ void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size) {
switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- *array_type = kExternal##Type##Array; \
- *element_size = size; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ *array_type = kExternal##Type##Array; \
+ *element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -3143,10 +3138,10 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
size_t* element_size,
ElementsKind* element_kind) {
switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- *element_size = size; \
- *element_kind = TYPE##_ELEMENTS; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ *element_size = sizeof(ctype); \
+ *element_kind = TYPE##_ELEMENTS; \
return;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -3156,10 +3151,10 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
}
JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
- Context* native_context = isolate->context()->native_context();
+ NativeContext* native_context = isolate->context()->native_context();
switch (type) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
return native_context->type##_array_fun();
TYPED_ARRAYS(TYPED_ARRAY_FUN)
@@ -3169,10 +3164,10 @@ JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
}
JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
- Context* native_context = isolate->context()->native_context();
+ NativeContext* native_context = isolate->context()->native_context();
switch (elements_kind) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return native_context->type##_array_fun();
TYPED_ARRAYS(TYPED_ARRAY_FUN)
@@ -3239,6 +3234,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
CHECK_EQ(byte_offset % element_size, 0);
CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
+ // TODO(7881): Smi length check
CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = length * element_size;
SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length,
@@ -3271,6 +3267,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
CHECK(number_of_elements <=
(std::numeric_limits<size_t>::max() / element_size));
+ // TODO(7881): Smi length check
CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = number_of_elements * element_size;
@@ -3509,9 +3506,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_raw_outer_scope_info_or_feedback_metadata(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
- share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_function_identifier_or_debug_info(*undefined_value(),
- SKIP_WRITE_BARRIER);
+ share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3520,6 +3515,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_length(0);
share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
+ share->set_builtin_function_id(
+ BuiltinFunctionId::kInvalidBuiltinFunctionId);
share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
@@ -3529,9 +3526,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->clear_padding();
}
// Link into the list.
- Handle<Object> new_noscript_list = FixedArrayOfWeakCells::Add(
- isolate(), noscript_shared_function_infos(), share);
- isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+ Handle<WeakArrayList> noscript_list = noscript_shared_function_infos();
+ noscript_list = WeakArrayList::AddToEnd(isolate(), noscript_list,
+ MaybeObjectHandle::Weak(share));
+ isolate()->heap()->set_noscript_shared_function_infos(*noscript_list);
#ifdef VERIFY_HEAP
share->SharedFunctionInfoVerify(isolate());
@@ -3539,68 +3537,90 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
return share;
}
-static inline int NumberCacheHash(Handle<FixedArray> cache,
- Handle<Object> number) {
+namespace {
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi* number) {
int mask = (cache->length() >> 1) - 1;
- if (number->IsSmi()) {
- return Handle<Smi>::cast(number)->value() & mask;
- } else {
- int64_t bits = bit_cast<int64_t>(number->Number());
- return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
+ return number->value() & mask;
+}
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, double number) {
+ int mask = (cache->length() >> 1) - 1;
+ int64_t bits = bit_cast<int64_t>(number);
+ return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
+}
+} // namespace
+
+Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
+ const char* string,
+ bool check_cache) {
+ // We tenure the allocated string since it is referenced from the
+ // number-string cache which lives in the old space.
+ Handle<String> js_string =
+ NewStringFromAsciiChecked(string, check_cache ? TENURED : NOT_TENURED);
+ if (!check_cache) return js_string;
+
+ if (!number_string_cache()->get(hash * 2)->IsUndefined(isolate())) {
+ int full_size = isolate()->heap()->MaxNumberToStringCacheSize();
+ if (number_string_cache()->length() != full_size) {
+ Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
+ isolate()->heap()->set_number_string_cache(*new_cache);
+ return js_string;
+ }
}
+ number_string_cache()->set(hash * 2, *number);
+ number_string_cache()->set(hash * 2 + 1, *js_string);
+ return js_string;
}
-Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
+Handle<Object> Factory::NumberToStringCacheGet(Object* number, int hash) {
DisallowHeapAllocation no_gc;
- int hash = NumberCacheHash(number_string_cache(), number);
Object* key = number_string_cache()->get(hash * 2);
- if (key == *number || (key->IsHeapNumber() && number->IsHeapNumber() &&
- key->Number() == number->Number())) {
+ if (key == number || (key->IsHeapNumber() && number->IsHeapNumber() &&
+ key->Number() == number->Number())) {
return Handle<String>(
String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
}
return undefined_value();
}
-void Factory::SetNumberStringCache(Handle<Object> number,
- Handle<String> string) {
- int hash = NumberCacheHash(number_string_cache(), number);
- if (number_string_cache()->get(hash * 2) != *undefined_value()) {
- int full_size = isolate()->heap()->FullSizeNumberStringCacheLength();
- if (number_string_cache()->length() != full_size) {
- Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
- isolate()->heap()->set_number_string_cache(*new_cache);
- return;
- }
+Handle<String> Factory::NumberToString(Handle<Object> number,
+ bool check_cache) {
+ if (number->IsSmi()) return NumberToString(Smi::cast(*number), check_cache);
+
+ double double_value = Handle<HeapNumber>::cast(number)->value();
+ // Try to canonicalize doubles.
+ int smi_value;
+ if (DoubleToSmiInteger(double_value, &smi_value)) {
+ return NumberToString(Smi::FromInt(smi_value), check_cache);
}
- number_string_cache()->set(hash * 2, *number);
- number_string_cache()->set(hash * 2 + 1, *string);
-}
-Handle<String> Factory::NumberToString(Handle<Object> number,
- bool check_number_string_cache) {
- isolate()->counters()->number_to_string_runtime()->Increment();
- if (check_number_string_cache) {
- Handle<Object> cached = GetNumberStringCache(number);
+ int hash = 0;
+ if (check_cache) {
+ hash = NumberToStringCacheHash(number_string_cache(), double_value);
+ Handle<Object> cached = NumberToStringCacheGet(*number, hash);
if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
char arr[100];
Vector<char> buffer(arr, arraysize(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Handle<Smi>::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = Handle<HeapNumber>::cast(number)->value();
- str = DoubleToCString(num, buffer);
+ const char* string = DoubleToCString(double_value, buffer);
+
+ return NumberToStringCacheSet(number, hash, string, check_cache);
+}
+
+Handle<String> Factory::NumberToString(Smi* number, bool check_cache) {
+ int hash = 0;
+ if (check_cache) {
+ hash = NumberToStringCacheHash(number_string_cache(), number);
+ Handle<Object> cached = NumberToStringCacheGet(number, hash);
+ if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
- // We tenure the allocated string since it is referenced from the
- // number-string cache which lives in the old space.
- Handle<String> js_string = NewStringFromAsciiChecked(str, TENURED);
- SetNumberStringCache(number, js_string);
- return js_string;
+ char arr[100];
+ Vector<char> buffer(arr, arraysize(arr));
+ const char* string = IntToCString(number->value(), buffer);
+
+ return NumberToStringCacheSet(handle(number, isolate()), hash, string,
+ check_cache);
}
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
@@ -3614,8 +3634,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
DCHECK(!shared->HasDebugInfo());
- debug_info->set_function_identifier(
- shared->function_identifier_or_debug_info());
+ debug_info->set_script(shared->script_or_debug_info());
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
@@ -3708,50 +3727,48 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
return result;
}
-Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
+Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
int number_of_properties) {
- DCHECK(native_context->IsNativeContext());
- const int kMapCacheSize = 128;
+ if (number_of_properties == 0) {
+ // Reuse the initial map of the Object function if the literal has no
+ // predeclared properties.
+ return handle(context->object_function()->initial_map(), isolate());
+ }
+
// We do not cache maps for too many properties or when running builtin code.
if (isolate()->bootstrapper()->IsActive()) {
return Map::Create(isolate(), number_of_properties);
}
+
// Use initial slow object proto map for too many properties.
+ const int kMapCacheSize = 128;
if (number_of_properties > kMapCacheSize) {
- return handle(native_context->slow_object_with_object_prototype_map(),
- isolate());
- }
- if (number_of_properties == 0) {
- // Reuse the initial map of the Object function if the literal has no
- // predeclared properties.
- return handle(native_context->object_function()->initial_map(), isolate());
+ return handle(context->slow_object_with_object_prototype_map(), isolate());
}
int cache_index = number_of_properties - 1;
- Handle<Object> maybe_cache(native_context->map_cache(), isolate());
+ Handle<Object> maybe_cache(context->map_cache(), isolate());
if (maybe_cache->IsUndefined(isolate())) {
// Allocate the new map cache for the native context.
- maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
- native_context->set_map_cache(*maybe_cache);
+ maybe_cache = NewWeakFixedArray(kMapCacheSize, TENURED);
+ context->set_map_cache(*maybe_cache);
} else {
// Check to see whether there is a matching element in the cache.
- Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
- Object* result = cache->get(cache_index);
- if (result->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(result);
- if (!cell->cleared()) {
- Map* map = Map::cast(cell->value());
- DCHECK(!map->is_dictionary_map());
- return handle(map, isolate());
- }
+ Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
+ MaybeObject* result = cache->Get(cache_index);
+ HeapObject* heap_object;
+ if (result->ToWeakHeapObject(&heap_object)) {
+ Map* map = Map::cast(heap_object);
+ DCHECK(!map->is_dictionary_map());
+ return handle(map, isolate());
}
}
+
// Create a new map and add it to the cache.
- Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
+ Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
DCHECK(!map->is_dictionary_map());
- Handle<WeakCell> cell = NewWeakCell(map);
- cache->set(cache_index, *cell);
+ cache->Set(cache_index, HeapObjectReference::Weak(*map));
return map;
}
@@ -3911,8 +3928,8 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
- Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ roc_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
@@ -3987,8 +4004,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
- Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ roc_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
@@ -4002,8 +4019,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
if (IsFunctionModeWithHomeObject(function_mode)) {
// Add home object field.
Handle<Name> name = isolate()->factory()->home_object_symbol();
- Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index b73e8a922a..cd57b5bf87 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -5,14 +5,18 @@
#ifndef V8_HEAP_FACTORY_H_
#define V8_HEAP_FACTORY_H_
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
#include "src/builtins/builtins.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/heap/heap.h"
+#include "src/maybe-handles.h"
#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/ordered-hash-table.h"
@@ -36,14 +40,17 @@ class DebugInfo;
class EnumCache;
class FreshlyAllocatedBigInt;
class Isolate;
+class JSGeneratorObject;
class JSMap;
class JSMapIterator;
class JSModuleNamespace;
+class JSProxy;
class JSSet;
class JSSetIterator;
class JSWeakMap;
class LoadHandler;
class ModuleInfo;
+class NativeContext;
class NewFunctionArgs;
class PreParsedScopeData;
class PromiseResolveThenableJobTask;
@@ -355,17 +362,18 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Symbol> NewPrivateFieldSymbol();
// Create a global (but otherwise uninitialized) context.
- Handle<Context> NewNativeContext();
+ Handle<NativeContext> NewNativeContext();
// Create a script context.
- Handle<Context> NewScriptContext(Handle<Context> outer,
+ Handle<Context> NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
// Create an empty script context table.
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<Module> module, Handle<Context> outer,
+ Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
// Create a function or eval context.
@@ -397,7 +405,8 @@ class V8_EXPORT_PRIVATE Factory {
// These are similar to function context but don't have a previous
// context or any scope info. These are used to store spec defined
// context values.
- Handle<Context> NewBuiltinContext(Handle<Context> native_context, int length);
+ Handle<Context> NewBuiltinContext(Handle<NativeContext> native_context,
+ int length);
Handle<Struct> NewStruct(InstanceType type,
PretenureFlag pretenure = NOT_TENURED);
@@ -454,9 +463,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PropertyCell> NewPropertyCell(Handle<Name> name,
PretenureFlag pretenure = TENURED);
- Handle<WeakCell> NewWeakCell(Handle<HeapObject> value,
- PretenureFlag pretenure = TENURED);
-
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
@@ -727,12 +733,14 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PreParsedScopeData> NewPreParsedScopeData(int length);
Handle<UncompiledDataWithoutPreParsedScope>
- NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+ NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
+ int32_t start_position,
int32_t end_position,
int32_t function_literal_id);
Handle<UncompiledDataWithPreParsedScope> NewUncompiledDataWithPreParsedScope(
- int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position, int32_t function_literal_id,
Handle<PreParsedScopeData>);
// Create an External object for V8's external API.
@@ -817,10 +825,11 @@ class V8_EXPORT_PRIVATE Factory {
DECLARE_ERROR(WasmRuntimeError)
#undef DECLARE_ERROR
- Handle<String> NumberToString(Handle<Object> number,
- bool check_number_string_cache = true);
+ Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
+ Handle<String> NumberToString(Smi* number, bool check_cache = true);
- inline Handle<String> Uint32ToString(uint32_t value);
+ inline Handle<String> Uint32ToString(uint32_t value,
+ bool check_cache = false);
#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
ROOT_LIST(ROOT_ACCESSOR)
@@ -908,7 +917,7 @@ class V8_EXPORT_PRIVATE Factory {
// Return a map for given number of properties using the map cache in the
// native context.
- Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
+ Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
int number_of_properties);
Handle<LoadHandler> NewLoadHandler(int data_count);
@@ -995,10 +1004,11 @@ class V8_EXPORT_PRIVATE Factory {
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- Handle<Object> GetNumberStringCache(Handle<Object> number);
+ Handle<Object> NumberToStringCacheGet(Object* number, int hash);
// Update the cache with a new number-string pair.
- void SetNumberStringCache(Handle<Object> number, Handle<String> string);
+ Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
+ const char* string, bool check_cache);
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 60a3b256c8..5ee7186c6a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -665,7 +665,6 @@ void GCTracer::PrintNVP() const {
"clear.slots_buffer=%.1f "
"clear.store_buffer=%.1f "
"clear.string_table=%.1f "
- "clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
"clear.weak_references=%.1f "
@@ -762,7 +761,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
current_.scopes[Scope::MC_CLEAR_STORE_BUFFER],
current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
- current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
current_.scopes[Scope::MC_CLEAR_WEAK_REFERENCES],
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 41f1a6bb3a..485b22902a 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -8,11 +8,6 @@
namespace v8 {
namespace internal {
-const double HeapController::kMinHeapGrowingFactor = 1.1;
-const double HeapController::kMaxHeapGrowingFactor = 4.0;
-const double HeapController::kConservativeHeapGrowingFactor = 1.3;
-const double HeapController::kTargetMutatorUtilization = 0.97;
-
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
@@ -52,74 +47,71 @@ const double HeapController::kTargetMutatorUtilization = 0.97;
// F * (1 - MU / (R * (1 - MU))) = 1
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double HeapController::HeapGrowingFactor(double gc_speed, double mutator_speed,
- double max_factor) {
- DCHECK_LE(kMinHeapGrowingFactor, max_factor);
- DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
+double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor) {
+ DCHECK_LE(kMinGrowingFactor, max_factor);
+ DCHECK_GE(kMaxGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
- const double mu = kTargetMutatorUtilization;
- const double a = speed_ratio * (1 - mu);
- const double b = speed_ratio * (1 - mu) - mu;
+ const double a = speed_ratio * (1 - kTargetMutatorUtilization);
+ const double b =
+ speed_ratio * (1 - kTargetMutatorUtilization) - kTargetMutatorUtilization;
// The factor is a / b, but we need to check for small b first.
double factor = (a < b * max_factor) ? a / b : max_factor;
factor = Min(factor, max_factor);
- factor = Max(factor, kMinHeapGrowingFactor);
+ factor = Max(factor, kMinGrowingFactor);
return factor;
}
-double HeapController::MaxHeapGrowingFactor(size_t max_old_generation_size) {
+double MemoryController::MaxGrowingFactor(size_t curr_max_size) {
const double min_small_factor = 1.3;
const double max_small_factor = 2.0;
const double high_factor = 4.0;
- size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
- max_old_generation_size_in_mb =
- Max(max_old_generation_size_in_mb,
- static_cast<size_t>(kMinOldGenerationSize));
+ size_t max_size_in_mb = curr_max_size / MB;
+ max_size_in_mb = Max(max_size_in_mb, kMinSize);
// If we are on a device with lots of memory, we allow a high heap
// growing factor.
- if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
+ if (max_size_in_mb >= kMaxSize) {
return high_factor;
}
- DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
- DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
+ DCHECK_GE(max_size_in_mb, kMinSize);
+ DCHECK_LT(max_size_in_mb, kMaxSize);
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
+ double factor = (max_size_in_mb - kMinSize) *
(max_small_factor - min_small_factor) /
- (kMaxOldGenerationSize - kMinOldGenerationSize) +
+ (kMaxSize - kMinSize) +
min_small_factor;
return factor;
}
-size_t HeapController::CalculateOldGenerationAllocationLimit(
- size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
- double mutator_speed, size_t new_space_capacity,
- Heap::HeapGrowingMode growing_mode) {
- double max_factor = MaxHeapGrowingFactor(max_old_generation_size);
- double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
+size_t MemoryController::CalculateAllocationLimit(
+ size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
+ size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
+ double max_factor = MaxGrowingFactor(max_size);
+ double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
heap_->isolate()->PrintWithTimestamp(
- "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "%s factor %.1f based on mu=%.3f, speed_ratio=%.f "
"(gc=%.f, mutator=%.f)\n",
- factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
- mutator_speed);
+ ControllerName(), factor, kTargetMutatorUtilization,
+ gc_speed / mutator_speed, gc_speed, mutator_speed);
}
if (growing_mode == Heap::HeapGrowingMode::kConservative ||
growing_mode == Heap::HeapGrowingMode::kSlow) {
- factor = Min(factor, kConservativeHeapGrowingFactor);
+ factor = Min(factor, kConservativeGrowingFactor);
}
if (growing_mode == Heap::HeapGrowingMode::kMinimal) {
- factor = kMinHeapGrowingFactor;
+ factor = kMinGrowingFactor;
}
if (FLAG_heap_growing_percent > 0) {
@@ -127,26 +119,25 @@ size_t HeapController::CalculateOldGenerationAllocationLimit(
}
CHECK_LT(1.0, factor);
- CHECK_LT(0, old_gen_size);
- uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
- limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+ CHECK_LT(0, curr_size);
+ uint64_t limit = static_cast<uint64_t>(curr_size * factor);
+ limit = Max(limit, static_cast<uint64_t>(curr_size) +
MinimumAllocationLimitGrowingStep(growing_mode));
limit += new_space_capacity;
uint64_t halfway_to_the_max =
- (static_cast<uint64_t>(old_gen_size) + max_old_generation_size) / 2;
+ (static_cast<uint64_t>(curr_size) + max_size) / 2;
size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
heap_->isolate()->PrintWithTimestamp(
- "Heap Controller Limit: old size: %" PRIuS " KB, new limit: %" PRIuS
- " KB (%.1f)\n",
- old_gen_size / KB, result / KB, factor);
+ "%s Limit: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
+ ControllerName(), curr_size / KB, result / KB, factor);
}
return result;
}
-size_t HeapController::MinimumAllocationLimitGrowingStep(
+size_t MemoryController::MinimumAllocationLimitGrowingStep(
Heap::HeapGrowingMode growing_mode) {
const size_t kRegularAllocationLimitGrowingStep = 8;
const size_t kLowMemoryAllocationLimitGrowingStep = 2;
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index 717c97a5b8..8aae46c279 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -13,40 +13,65 @@
namespace v8 {
namespace internal {
-class HeapController {
+class V8_EXPORT_PRIVATE MemoryController {
public:
- explicit HeapController(Heap* heap) : heap_(heap) {}
+ MemoryController(Heap* heap, double min_growing_factor,
+ double max_growing_factor,
+ double conservative_growing_factor,
+ double target_mutator_utilization, size_t min_size,
+ size_t max_size)
+ : heap_(heap),
+ kMinGrowingFactor(min_growing_factor),
+ kMaxGrowingFactor(max_growing_factor),
+ kConservativeGrowingFactor(conservative_growing_factor),
+ kTargetMutatorUtilization(target_mutator_utilization),
+ kMinSize(min_size),
+ kMaxSize(max_size) {}
+ virtual ~MemoryController() {}
- // Computes the allocation limit to trigger the next full garbage collection.
- V8_EXPORT_PRIVATE size_t CalculateOldGenerationAllocationLimit(
- size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
- double mutator_speed, size_t new_space_capacity,
- Heap::HeapGrowingMode growing_mode);
+ // Computes the allocation limit to trigger the next garbage collection.
+ size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
+ double gc_speed, double mutator_speed,
+ size_t new_space_capacity,
+ Heap::HeapGrowingMode growing_mode);
+ // Computes the growing step when the limit increases.
size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
- // The old space size has to be a multiple of Page::kPageSize.
+ protected:
+ double GrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor);
+ double MaxGrowingFactor(size_t curr_max_size);
+ virtual const char* ControllerName() = 0;
+
+ Heap* const heap_;
+
+ const double kMinGrowingFactor;
+ const double kMaxGrowingFactor;
+ const double kConservativeGrowingFactor;
+ const double kTargetMutatorUtilization;
// Sizes are in MB.
- static const size_t kMinOldGenerationSize = 128 * Heap::kPointerMultiplier;
- static const size_t kMaxOldGenerationSize = 1024 * Heap::kPointerMultiplier;
+ const size_t kMinSize;
+ const size_t kMaxSize;
- private:
- FRIEND_TEST(HeapController, HeapGrowingFactor);
- FRIEND_TEST(HeapController, MaxHeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, HeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, MaxHeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, MaxOldGenerationSize);
FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
+};
- V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
- V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
- V8_EXPORT_PRIVATE static const double kConservativeHeapGrowingFactor;
- V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
- size_t max_old_generation_size);
- V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
- double mutator_speed,
- double max_factor);
+class HeapController : public MemoryController {
+ public:
+ explicit HeapController(Heap* heap)
+ : MemoryController(heap, 1.1, 4.0, 1.3, 0.97, kMinHeapSize,
+ kMaxHeapSize) {}
- static const double kTargetMutatorUtilization;
+ // Sizes are in MB.
+ static const size_t kMinHeapSize = 128 * Heap::kPointerMultiplier;
+ static const size_t kMaxHeapSize = 1024 * Heap::kPointerMultiplier;
- Heap* heap_;
+ protected:
+ const char* ControllerName() { return "HeapController"; }
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 5ad1a1bdd6..62f07ea322 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -8,17 +8,18 @@
#include <cmath>
// Clients of this interface shouldn't depend on lots of heap internals.
-// Do not include anything from src/heap other than src/heap/heap.h here!
+// Do not include anything from src/heap other than src/heap/heap.h and its
+// write barrier here!
+#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/base/platform/platform.h"
#include "src/counters-inl.h"
#include "src/feedback-vector.h"
-// TODO(mstarzinger): There are 3 more includes to remove in order to no longer
+
+// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
-#include "src/heap/incremental-marking-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/store-buffer.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/msan.h"
@@ -32,6 +33,12 @@
#include "src/string-hasher.h"
#include "src/zone/zone-list-inl.h"
+// The following header includes the write barrier essentials that can also be
+// used stand-alone without including heap-inl.h.
+// TODO(mlippautz): Remove once users of object-macros.h include this file on
+// their own.
+#include "src/heap/heap-write-barrier-inl.h"
+
namespace v8 {
namespace internal {
@@ -279,12 +286,33 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
void Heap::RegisterExternalString(String* string) {
+ DCHECK(string->IsExternalString());
+ DCHECK(!string->IsThinString());
external_string_table_.AddString(string);
}
+void Heap::UpdateExternalString(String* string, size_t old_payload,
+ size_t new_payload) {
+ DCHECK(string->IsExternalString());
+ Page* page = Page::FromHeapObject(string);
+
+ if (old_payload > new_payload)
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, old_payload - new_payload);
+ else
+ page->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, new_payload - old_payload);
+}
void Heap::FinalizeExternalString(String* string) {
DCHECK(string->IsExternalString());
+ Page* page = Page::FromHeapObject(string);
+ ExternalString* ext_string = ExternalString::cast(string);
+
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
+ ext_string->ExternalPayloadSize());
+
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
@@ -399,40 +427,6 @@ bool Heap::ShouldBePromoted(Address old_address) {
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
-void Heap::RecordWrite(Object* object, Object** slot, Object* value) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
- DCHECK(!HasWeakHeapObjectTag(value));
- DCHECK(object->IsHeapObject()); // Can't write to slots of a Smi.
- if (!InNewSpace(value) || InNewSpace(HeapObject::cast(object))) return;
- store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
-}
-
-void Heap::RecordWrite(Object* object, MaybeObject** slot, MaybeObject* value) {
- if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
- return;
- }
- store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
-}
-
-void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
- if (InNewSpace(value)) {
- RecordWriteIntoCodeSlow(host, rinfo, value);
- }
-}
-
-void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
- if (InNewSpace(array)) return;
- for (int i = 0; i < length; i++) {
- if (!InNewSpace(array->get(offset + i))) continue;
- store_buffer()->InsertEntry(
- reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
- }
-}
-
-Address* Heap::store_buffer_top_address() {
- return store_buffer()->top_address();
-}
-
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
@@ -530,6 +524,8 @@ Isolate* Heap::isolate() {
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
+ DCHECK(!Contains(string));
+
if (InNewSpace(string)) {
new_space_strings_.push_back(string);
} else {
@@ -573,6 +569,18 @@ int Heap::GetNextTemplateSerialNumber() {
return next_serial_number;
}
+int Heap::MaxNumberToStringCacheSize() const {
+ // Compute the size of the number string cache based on the max newspace size.
+ // The number string cache has a minimum size based on twice the initial cache
+ // size to ensure that it is bigger after being made 'full size'.
+ size_t number_string_cache_size = max_semi_space_size_ / 512;
+ number_string_cache_size =
+ Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
+ Min<size_t>(0x4000u, number_string_cache_size));
+ // There is a string and a number per entry so the length is twice the number
+ // of entries.
+ return static_cast<int>(number_string_cache_size * 2);
+}
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_++;
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
new file mode 100644
index 0000000000..1e4550679c
--- /dev/null
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -0,0 +1,157 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
+#define V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
+
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
+
+#include "src/heap/heap-write-barrier.h"
+
+#include "src/globals.h"
+#include "src/objects-inl.h"
+#include "src/objects/maybe-object-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Do not use these internal details anywhere outside of this file. These
+// internals are only intended to shortcut write barrier checks.
+namespace heap_internals {
+
+struct MemoryChunk {
+ static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
+ static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3;
+ static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4;
+
+ V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
+ HeapObject* object) {
+ return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(object) &
+ ~kPageAlignmentMask);
+ }
+
+ V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
+
+ V8_INLINE bool InNewSpace() const {
+ constexpr uintptr_t kNewSpaceMask = kFromSpaceBit | kToSpaceBit;
+ return GetFlags() & kNewSpaceMask;
+ }
+
+ V8_INLINE uintptr_t GetFlags() const {
+ return *reinterpret_cast<const uintptr_t*>(
+ reinterpret_cast<const uint8_t*>(this) + kFlagsOffset);
+ }
+};
+
+inline void GenerationalBarrierInternal(HeapObject* object, Address slot,
+ HeapObject* value) {
+ DCHECK(Heap::PageFlagsAreConsistent(object));
+ heap_internals::MemoryChunk* value_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(value);
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+
+ if (!value_chunk->InNewSpace() || object_chunk->InNewSpace()) return;
+
+ Heap::GenerationalBarrierSlow(object, slot, value);
+}
+
+inline void MarkingBarrierInternal(HeapObject* object, Address slot,
+ HeapObject* value) {
+ DCHECK(Heap::PageFlagsAreConsistent(object));
+ heap_internals::MemoryChunk* value_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(value);
+
+ if (!value_chunk->IsMarking()) return;
+
+ Heap::MarkingBarrierSlow(object, slot, value);
+}
+
+} // namespace heap_internals
+
+inline void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(value);
+ GenerationalBarrierForCode(host, rinfo, object);
+ MarkingBarrierForCode(host, rinfo, object);
+}
+
+inline void WriteBarrierForCode(Code* host) {
+ Heap::WriteBarrierForCodeSlow(host);
+}
+
+inline void GenerationalBarrier(HeapObject* object, Object** slot,
+ Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::GenerationalBarrierInternal(
+ object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+}
+
+inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value) {
+ HeapObject* value_heap_object;
+ if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ heap_internals::GenerationalBarrierInternal(
+ object, reinterpret_cast<Address>(slot), value_heap_object);
+}
+
+inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
+ int offset, int length) {
+ heap_internals::MemoryChunk* array_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(array);
+ if (array_chunk->InNewSpace()) return;
+
+ Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
+}
+
+inline void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->InNewSpace()) return;
+ Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
+}
+
+inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
+ DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::MarkingBarrierInternal(
+ object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+}
+
+inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value) {
+ HeapObject* value_heap_object;
+ if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ heap_internals::MarkingBarrierInternal(
+ object, reinterpret_cast<Address>(slot), value_heap_object);
+}
+
+inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->IsMarking()) return;
+
+ Heap::MarkingBarrierForElementsSlow(heap, object);
+}
+
+inline void MarkingBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->IsMarking()) return;
+ Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
new file mode 100644
index 0000000000..4eaeaae8a4
--- /dev/null
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -0,0 +1,51 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_
+#define V8_HEAP_HEAP_WRITE_BARRIER_H_
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class FixedArray;
+class Heap;
+class HeapObject;
+class MaybeObject;
+class Object;
+class RelocInfo;
+
+// Note: In general it is preferred to use the macros defined in
+// object-macros.h.
+
+// Write barrier for FixedArray elements.
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+ do { \
+ GenerationalBarrierForElements(heap, array, start, length); \
+ MarkingBarrierForElements(heap, array); \
+ } while (false)
+
+// Combined write barriers.
+void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value);
+void WriteBarrierForCode(Code* host);
+
+// Generational write barrier.
+void GenerationalBarrier(HeapObject* object, Object** slot, Object* value);
+void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value);
+void GenerationalBarrierForElements(Heap* heap, FixedArray* array, int offset,
+ int length);
+void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object);
+
+// Marking write barrier.
+void MarkingBarrier(HeapObject* object, Object** slot, Object* value);
+void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value);
+void MarkingBarrierForElements(Heap* heap, HeapObject* object);
+void MarkingBarrierForCode(Code* host, RelocInfo* rinfo, HeapObject* object);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_WRITE_BARRIER_H_
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 6fd93f659f..2ec30635be 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -8,7 +8,7 @@
#include <unordered_set>
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
@@ -31,6 +31,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
@@ -258,8 +259,8 @@ size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
old_space_physical_memory_factor *
kPointerMultiplier);
- return Max(Min(computed_size, HeapController::kMaxOldGenerationSize),
- HeapController::kMinOldGenerationSize);
+ return Max(Min(computed_size, HeapController::kMaxHeapSize),
+ HeapController::kMinHeapSize);
}
size_t Heap::Capacity() {
@@ -513,22 +514,25 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
if (!FLAG_track_retaining_path) {
PrintF("Retaining path tracking requires --track-retaining-path\n");
} else {
- int index = 0;
- Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
- isolate(), handle(retaining_path_targets(), isolate()), object, &index);
+ Handle<WeakArrayList> array(retaining_path_targets(), isolate());
+ int index = array->length();
+ array = WeakArrayList::AddToEnd(isolate(), array,
+ MaybeObjectHandle::Weak(object));
set_retaining_path_targets(*array);
+ DCHECK_EQ(array->length(), index + 1);
retaining_path_target_option_[index] = option;
}
}
bool Heap::IsRetainingPathTarget(HeapObject* object,
RetainingPathOption* option) {
- if (!retaining_path_targets()->IsFixedArrayOfWeakCells()) return false;
- FixedArrayOfWeakCells* targets =
- FixedArrayOfWeakCells::cast(retaining_path_targets());
- int length = targets->Length();
+ WeakArrayList* targets = retaining_path_targets();
+ int length = targets->length();
+ MaybeObject* object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
- if (targets->Get(i) == object) {
+ MaybeObject* target = targets->Get(i);
+ DCHECK(target->IsWeakOrClearedHeapObject());
+ if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
*option = retaining_path_target_option_[i];
return true;
@@ -1038,29 +1042,6 @@ void Heap::GarbageCollectionEpilogue() {
}
}
-
-void Heap::PreprocessStackTraces() {
- FixedArrayOfWeakCells::Iterator iterator(weak_stack_trace_list());
- FixedArray* elements;
- while ((elements = iterator.Next<FixedArray>()) != nullptr) {
- for (int j = 1; j < elements->length(); j += 4) {
- Object* maybe_code = elements->get(j + 2);
- // If GC happens while adding a stack trace to the weak fixed array,
- // which has been copied into a larger backing store, we may run into
- // a stack trace that has already been preprocessed. Guard against this.
- if (!maybe_code->IsAbstractCode()) break;
- AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
- int offset = Smi::ToInt(elements->get(j + 3));
- int pos = abstract_code->SourcePosition(offset);
- elements->set(j + 2, Smi::FromInt(pos));
- }
- }
- // We must not compact the weak fixed list here, as we may be in the middle
- // of writing to it, when the GC triggered. Instead, we reset the root value.
- set_weak_stack_trace_list(Smi::kZero);
-}
-
-
class GCCallbacksScope {
public:
explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
@@ -1175,8 +1156,7 @@ intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
return 0;
}
-void ReportDuplicates(Isolate* isolate, int size,
- std::vector<HeapObject*>& objects) {
+void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
if (objects.size() == 0) return;
sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
@@ -1274,7 +1254,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(isolate(), it->first, it->second);
+ ReportDuplicates(it->first, it->second);
}
}
}
@@ -1805,7 +1785,7 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
- size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ size_t new_limit = heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
new_space()->Capacity(), CurrentHeapGrowingMode());
old_generation_allocation_limit_ = new_limit;
@@ -1814,7 +1794,7 @@ bool Heap::PerformGarbageCollection(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
- size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ size_t new_limit = heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
new_space()->Capacity(), CurrentHeapGrowingMode());
if (new_limit < old_generation_allocation_limit_) {
@@ -1925,7 +1905,6 @@ void Heap::MarkCompactEpilogue() {
incremental_marking()->Epilogue();
- PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
}
@@ -2296,6 +2275,25 @@ void Heap::ProtectUnprotectedMemoryChunks() {
unprotected_memory_chunks_.clear();
}
+bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
+ for (size_t i = 0; i < new_space_strings_.size(); ++i) {
+ if (new_space_strings_[i] == obj) return true;
+ }
+ for (size_t i = 0; i < old_space_strings_.size(); ++i) {
+ if (old_space_strings_[i] == obj) return true;
+ }
+ return false;
+}
+
+void Heap::ProcessMovedExternalString(Page* old_page, Page* new_page,
+ ExternalString* string) {
+ size_t size = string->ExternalPayloadSize();
+ new_page->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, size);
+ old_page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, size);
+}
+
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2313,24 +2311,70 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
}
// String is still reachable.
- String* string = String::cast(first_word.ToForwardingAddress());
- if (string->IsThinString()) string = ThinString::cast(string)->actual();
+ String* new_string = String::cast(first_word.ToForwardingAddress());
+ String* original_string = reinterpret_cast<String*>(*p);
+ // The length of the original string is used to disambiguate the scenario
+ // of a ThingString being forwarded to an ExternalString (which already exists
+ // in the OLD space), and an ExternalString being forwarded to its promoted
+ // copy. See Scavenger::EvacuateThinString.
+ if (new_string->IsThinString() || original_string->length() == 0) {
+ // Filtering Thin strings out of the external string table.
+ return nullptr;
+ } else if (new_string->IsExternalString()) {
+ heap->ProcessMovedExternalString(
+ Page::FromAddress(reinterpret_cast<Address>(*p)),
+ Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ return new_string;
+ }
+
// Internalization can replace external strings with non-external strings.
- return string->IsExternalString() ? string : nullptr;
+ return new_string->IsExternalString() ? new_string : nullptr;
}
-void Heap::ExternalStringTable::Verify() {
+void Heap::ExternalStringTable::VerifyNewSpace() {
#ifdef DEBUG
+ std::set<String*> visited_map;
+ std::map<MemoryChunk*, size_t> size_map;
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- DCHECK(InNewSpace(obj));
+ String* obj = String::cast(new_space_strings_[i]);
+ MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
+ DCHECK(mc->InNewSpace());
+ DCHECK(heap_->InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
+ DCHECK(obj->IsExternalString());
+ // Note: we can have repeated elements in the table.
+ DCHECK_EQ(0, visited_map.count(obj));
+ visited_map.insert(obj);
+ size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ }
+ for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
+ it != size_map.end(); it++)
+ DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
+#endif
+}
+
+void Heap::ExternalStringTable::Verify() {
+#ifdef DEBUG
+ std::set<String*> visited_map;
+ std::map<MemoryChunk*, size_t> size_map;
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+ VerifyNewSpace();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- DCHECK(!InNewSpace(obj));
+ String* obj = String::cast(old_space_strings_[i]);
+ MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
+ DCHECK(!mc->InNewSpace());
+ DCHECK(!heap_->InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
+ DCHECK(obj->IsExternalString());
+ // Note: we can have repeated elements in the table.
+ DCHECK_EQ(0, visited_map.count(obj));
+ visited_map.insert(obj);
+ size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ }
+ for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
+ it != size_map.end(); it++)
+ DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
#endif
}
@@ -2363,7 +2407,7 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
new_space_strings_.resize(static_cast<size_t>(last - start));
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- Verify();
+ VerifyNewSpace();
}
#endif
}
@@ -2661,7 +2705,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kRetainingPathTargetsRootIndex:
case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
- case kWeakStackTraceListRootIndex:
case kSerializedObjectsRootIndex:
case kSerializedGlobalProxySizesRootIndex:
case kPublicSymbolTableRootIndex:
@@ -2691,19 +2734,6 @@ bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return can_be;
}
-int Heap::FullSizeNumberStringCacheLength() {
- // Compute the size of the number string cache based on the max newspace size.
- // The number string cache has a minimum size based on twice the initial cache
- // size to ensure that it is bigger after being made 'full size'.
- size_t number_string_cache_size = max_semi_space_size_ / 512;
- number_string_cache_size =
- Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
- Min<size_t>(0x4000u, number_string_cache_size));
- // There is a string and a number per entry so the length is twice the number
- // of entries.
- return static_cast<int>(number_string_cache_size * 2);
-}
-
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
@@ -2717,8 +2747,8 @@ namespace {
Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
@@ -2729,8 +2759,8 @@ Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -2742,8 +2772,8 @@ Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
ElementsKind elements_kind) {
switch (elements_kind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return Heap::kEmptyFixed##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
@@ -2782,7 +2812,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
- Memory::Address_at(addr + kPointerSize) =
+ Memory<Address>(addr + kPointerSize) =
static_cast<Address>(kClearedFreeMemoryValue);
}
} else {
@@ -2850,8 +2880,23 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
} // namespace
#endif // ENABLE_SLOW_DCHECKS
+namespace {
+bool MayContainRecordedSlots(HeapObject* object) {
+ // New space object do not have recorded slots.
+ if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
+ // Whitelist objects that definitely do not have pointers.
+ if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
+ // Conservatively return true for other objects.
+ return true;
+}
+} // namespace
+
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
+ if (elements_to_trim == 0) {
+ // This simplifies reasoning in the rest of the function.
+ return object;
+ }
CHECK_NOT_NULL(object);
DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type
@@ -2886,7 +2931,8 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+ HeapObject* filler =
+ CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
@@ -2903,6 +2949,23 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
ClearRecordedSlot(new_object, HeapObject::RawField(
new_object, FixedArrayBase::kLengthOffset));
+ // Handle invalidated old-to-old slots.
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(new_object)) {
+ // If the array was right-trimmed before, then it is registered in
+ // the invalidated_slots.
+ MemoryChunk::FromHeapObject(new_object)
+ ->MoveObjectWithInvalidatedSlots(filler, new_object);
+ // We have to clear slots in the free space to avoid stale old-to-old slots.
+ // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
+ // we need pointer granularity writes to avoid race with the concurrent
+ // marking.
+ if (filler->Size() > FreeSpace::kSize) {
+ MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
+ ReadOnlyRoots(this).undefined_value(),
+ (filler->Size() - FreeSpace::kSize) / kPointerSize);
+ }
+ }
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
@@ -2967,9 +3030,23 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
}
// Calculate location of new array end.
- Address old_end = object->address() + object->Size();
+ int old_size = object->Size();
+ Address old_end = object->address() + old_size;
Address new_end = old_end - bytes_to_trim;
+ // Register the array as an object with invalidated old-to-old slots. We
+ // cannot use NotifyObjectLayoutChange as it would mark the array black,
+ // which is not safe for left-trimming because left-trimming re-pushes
+ // only grey arrays onto the marking worklist.
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(object)) {
+ // Ensure that the object survives because the InvalidatedSlotsFilter will
+ // compute its size from its map during pointers updating phase.
+ incremental_marking()->WhiteToGreyAndPush(object);
+ MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
+ object, old_size);
+ }
+
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
@@ -3258,15 +3335,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
- DCHECK(InOldSpace(object) || InNewSpace(object) ||
- (lo_space()->Contains(object) && object->IsString()));
- if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
+ if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
- if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
- // The concurrent marker might have recorded slots for the object.
- // Register this object as invalidated to filter out the slots.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- chunk->RegisterObjectWithInvalidatedSlots(object, size);
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(object)) {
+ MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
+ object, size);
}
}
#ifdef VERIFY_HEAP
@@ -4800,7 +4874,7 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
}
void Heap::TracePossibleWrapper(JSObject* js_object) {
- DCHECK(js_object->WasConstructedFromApiFunction());
+ DCHECK(js_object->IsApiWrapper());
if (js_object->GetEmbedderFieldCount() >= 2 &&
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
@@ -5005,15 +5079,37 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
}
namespace {
-void CompactFixedArrayOfWeakCells(Isolate* isolate, Object* object) {
- if (object->IsFixedArrayOfWeakCells()) {
- FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
- array->Compact<FixedArrayOfWeakCells::NullCallback>(isolate);
- }
+Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
+ Handle<WeakArrayList> array,
+ PretenureFlag pretenure) {
+ if (array->length() == 0) {
+ return array;
+ }
+ int new_length = array->CountLiveWeakReferences();
+ if (new_length == array->length()) {
+ return array;
+ }
+
+ Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
+ heap->isolate(),
+ handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
+ new_length, pretenure);
+ // Allocation might have caused GC and turned some of the elements into
+ // cleared weak heap objects. Count the number of live references again and
+ // fill in the new array.
+ int copy_to = 0;
+ for (int i = 0; i < array->length(); i++) {
+ MaybeObject* element = array->Get(i);
+ if (element->IsClearedWeakHeapObject()) continue;
+ new_array->Set(copy_to++, element);
+ }
+ new_array->set_length(copy_to);
+ return new_array;
}
+
} // anonymous namespace
-void Heap::CompactFixedArraysOfWeakCells() {
+void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
@@ -5030,15 +5126,25 @@ void Heap::CompactFixedArraysOfWeakCells() {
for (auto& prototype_info : prototype_infos) {
Handle<WeakArrayList> array(
WeakArrayList::cast(prototype_info->prototype_users()), isolate());
+ DCHECK_IMPLIES(pretenure == TENURED,
+ InOldSpace(*array) ||
+ *array == ReadOnlyRoots(this).empty_weak_array_list());
WeakArrayList* new_array = PrototypeUsers::Compact(
- array, this, JSObject::PrototypeRegistryCompactionCallback);
+ array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
prototype_info->set_prototype_users(new_array);
}
- // Find known FixedArrayOfWeakCells and compact them.
- CompactFixedArrayOfWeakCells(isolate(), noscript_shared_function_infos());
- CompactFixedArrayOfWeakCells(isolate(), script_list());
- CompactFixedArrayOfWeakCells(isolate(), weak_stack_trace_list());
+ // Find known WeakArrayLists and compact them.
+ Handle<WeakArrayList> scripts(script_list(), isolate());
+ DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
+ scripts = CompactWeakArrayList(this, scripts, pretenure);
+ set_script_list(*scripts);
+
+ Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
+ isolate());
+ DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
+ no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
+ set_noscript_shared_function_infos(*no_script_list);
}
void Heap::AddRetainedMap(Handle<Map> map) {
@@ -5139,6 +5245,20 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
+Address* Heap::store_buffer_top_address() {
+ return store_buffer()->top_address();
+}
+
+// static
+intptr_t Heap::store_buffer_mask_constant() {
+ return StoreBuffer::kStoreBufferMask;
+}
+
+// static
+Address Heap::store_buffer_overflow_function_address() {
+ return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
+}
+
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
@@ -5168,34 +5288,6 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
}
-void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
- Object* value) {
- DCHECK(InNewSpace(value));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else {
- DCHECK(RelocInfo::IsEmbeddedObject(rmode));
- slot_type = OBJECT_SLOT;
- }
- }
- RememberedSet<OLD_TO_NEW>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
-}
-
-void Heap::RecordWritesIntoCode(Code* code) {
- for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- !it.done(); it.next()) {
- RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
- }
-}
-
-
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case RO_SPACE:
@@ -5450,19 +5542,15 @@ void Heap::ExternalStringTable::CleanUpAll() {
void Heap::ExternalStringTable::TearDown() {
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
Object* o = new_space_strings_[i];
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // Dont finalize thin strings.
+ if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
new_space_strings_.clear();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
Object* o = old_space_strings_[i];
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // Dont finalize thin strings.
+ if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
old_space_strings_.clear();
@@ -5773,5 +5861,105 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
}
}
+void Heap::WriteBarrierForCodeSlow(Code* code) {
+ for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ !it.done(); it.next()) {
+ GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
+ MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
+ }
+}
+
+void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
+ HeapObject* value) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ heap->store_buffer()->InsertEntry(slot);
+}
+
+void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray* array,
+ int offset, int length) {
+ for (int i = 0; i < length; i++) {
+ if (!InNewSpace(array->get(offset + i))) continue;
+ heap->store_buffer()->InsertEntry(
+ reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+ }
+}
+
+void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ DCHECK(InNewSpace(object));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ RelocInfo::Mode rmode = rinfo->rmode();
+ Address addr = rinfo->pc();
+ SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = OBJECT_SLOT;
+ }
+ }
+ RememberedSet<OLD_TO_NEW>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), slot_type, addr);
+}
+
+void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
+ HeapObject* value) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ heap->incremental_marking()->RecordWriteSlow(
+ object, reinterpret_cast<HeapObjectReference**>(slot), value);
+}
+
+void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
+ if (FLAG_concurrent_marking ||
+ heap->incremental_marking()->marking_state()->IsBlack(object)) {
+ heap->incremental_marking()->RevisitObject(object);
+ }
+}
+
+void Heap::MarkingBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ Heap* heap = Heap::FromWritableHeapObject(host);
+ DCHECK(heap->incremental_marking()->IsMarking());
+ heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
+}
+
+bool Heap::PageFlagsAreConsistent(HeapObject* object) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ heap_internals::MemoryChunk* slim_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+
+ const bool generation_consistency =
+ chunk->owner()->identity() != NEW_SPACE ||
+ (chunk->InNewSpace() && slim_chunk->InNewSpace());
+ const bool marking_consistency =
+ !heap->incremental_marking()->IsMarking() ||
+ (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
+ slim_chunk->IsMarking());
+
+ return generation_consistency && marking_consistency;
+}
+
+static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
+ heap_internals::MemoryChunk::kMarkingBit,
+ "Incremental marking flag inconsistent");
+static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
+ heap_internals::MemoryChunk::kFromSpaceBit,
+ "From space flag inconsistent");
+static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
+ heap_internals::MemoryChunk::kToSpaceBit,
+ "To space flag inconsistent");
+static_assert(MemoryChunk::kFlagsOffset ==
+ heap_internals::MemoryChunk::kFlagsOffset,
+ "Flag offset inconsistent");
+
+void Heap::SetEmbedderStackStateForNextFinalizaton(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
+ stack_state);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 0f3c9ea389..2e750d56fa 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -47,7 +47,7 @@ class DeoptimizationData;
class HandlerTable;
class IncrementalMarking;
class JSArrayBuffer;
-
+class ExternalString;
using v8::MemoryPressureLevel;
// Heap roots that are known to be immortal immovable, for which we can safely
@@ -88,7 +88,6 @@ using v8::MemoryPressureLevel;
V(EmptyScript) \
V(EmptySloppyArgumentsElements) \
V(EmptySlowElementDictionary) \
- V(EmptyWeakCell) \
V(EvalContextMap) \
V(Exception) \
V(FalseValue) \
@@ -159,19 +158,12 @@ using v8::MemoryPressureLevel;
V(UninitializedValue) \
V(UncompiledDataWithoutPreParsedScopeMap) \
V(UncompiledDataWithPreParsedScopeMap) \
- V(WeakCellMap) \
V(WeakFixedArrayMap) \
V(WeakArrayListMap) \
V(WithContextMap) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
-#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
- do { \
- heap->RecordFixedArrayElements(array, start, length); \
- heap->incremental_marking()->RecordWrites(array); \
- } while (false)
-
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferTracker;
@@ -492,6 +484,24 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
+ V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code* host);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
+ Address slot,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
+ Heap* heap, FixedArray* array, int offset, int length);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
+ Code* host, RelocInfo* rinfo, HeapObject* value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
+ Address slot,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
+ Heap* heap, HeapObject* object);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code* host,
+ RelocInfo* rinfo,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
+
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
@@ -678,7 +688,10 @@ class Heap {
external_memory_concurrently_freed_ = 0;
}
- void CompactFixedArraysOfWeakCells();
+ void ProcessMovedExternalString(Page* old_page, Page* new_page,
+ ExternalString* string);
+
+ void CompactWeakArrayLists(PretenureFlag pretenure);
void AddRetainedMap(Handle<Map> map);
@@ -842,6 +855,10 @@ class Heap {
return kRootsBuiltinsOffset;
}
+ static constexpr int root_register_addressable_end_offset() {
+ return kRootRegisterAddressableEndOffset;
+ }
+
Address root_register_addressable_end() {
return reinterpret_cast<Address>(roots_array_start()) +
kRootRegisterAddressableEndOffset;
@@ -971,16 +988,6 @@ class Heap {
// Store buffer API. =========================================================
// ===========================================================================
- // Write barrier support for object[offset] = o;
- inline void RecordWrite(Object* object, MaybeObject** slot,
- MaybeObject* value);
- inline void RecordWrite(Object* object, Object** slot, Object* value);
- inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
- void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
- void RecordWritesIntoCode(Code* code);
- inline void RecordFixedArrayElements(FixedArray* array, int offset,
- int length);
-
// Used for query incremental marking status in generated code.
Address* IsMarkingFlagAddress() {
return reinterpret_cast<Address*>(&is_marking_flag_);
@@ -988,7 +995,9 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- inline Address* store_buffer_top_address();
+ Address* store_buffer_top_address();
+ static intptr_t store_buffer_mask_constant();
+ static Address store_buffer_overflow_function_address();
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
@@ -1081,6 +1090,8 @@ class Heap {
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
void TracePossibleWrapper(JSObject* js_object);
void RegisterExternallyReferencedObject(Object** object);
+ void SetEmbedderStackStateForNextFinalizaton(
+ EmbedderHeapTracer::EmbedderStackState stack_state);
// ===========================================================================
// External string table API. ================================================
@@ -1089,6 +1100,11 @@ class Heap {
// Registers an external string.
inline void RegisterExternalString(String* string);
+ // Called when a string's resource is changed. The size of the payload is sent
+ // as argument of the method.
+ inline void UpdateExternalString(String* string, size_t old_payload,
+ size_t new_payload);
+
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
inline void FinalizeExternalString(String* string);
@@ -1466,6 +1482,9 @@ class Heap {
static const char* GarbageCollectionReasonToString(
GarbageCollectionReason gc_reason);
+ // Calculates the nof entries for the full sized number to string cache.
+ inline int MaxNumberToStringCacheSize() const;
+
private:
class SkipStoreBufferScope;
@@ -1481,6 +1500,7 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
+ bool Contains(HeapObject* obj);
void IterateAll(RootVisitor* v);
void IterateNewSpaceStrings(RootVisitor* v);
@@ -1501,6 +1521,7 @@ class Heap {
private:
void Verify();
+ void VerifyNewSpace();
Heap* const heap_;
@@ -1615,8 +1636,6 @@ class Heap {
int NumberOfScavengeTasks();
- void PreprocessStackTraces();
-
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
@@ -1675,8 +1694,6 @@ class Heap {
// Record statistics after garbage collection.
void ReportStatisticsAfterGC();
- // Creates and installs the full-sized number string cache.
- int FullSizeNumberStringCacheLength();
// Flush the number to string cache.
void FlushNumberStringCache();
@@ -2249,7 +2266,7 @@ class Heap {
friend class EphemeronHashTableMarkingTask;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class HeapController;
+ friend class MemoryController;
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index d2736c6715..19d6b22e4d 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -7,6 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/objects/maybe-object.h"
namespace v8 {
@@ -35,21 +36,6 @@ void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
}
}
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- if (FLAG_concurrent_marking || marking_state()->IsBlack(obj)) {
- RevisitObject(obj);
- }
- }
-}
-
-void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
- Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- RecordWriteIntoCodeSlow(host, rinfo, value);
- }
-}
-
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 2b84a45999..a58d25fff4 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -11,6 +11,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -113,8 +114,9 @@ int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
return 0;
}
-void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
- Object* value) {
+void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
+ HeapObject* value) {
+ DCHECK(IsMarking());
if (BaseRecordWrite(host, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
@@ -130,6 +132,9 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
}
void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
+ // Marking left-trimmable fixed array black is unsafe because left-trimming
+ // re-pushes only grey arrays onto the marking worklist.
+ DCHECK(!obj->IsFixedArrayBase());
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
@@ -197,7 +202,10 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
DCHECK(success);
USE(success);
}
- marking_worklist()->Push(to);
+ // Subsequent left-trimming will re-push only grey arrays.
+ // Ensure that this array is grey.
+ DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
+ marking_worklist()->PushBailout(to);
RestartIfNotMarking();
}
}
@@ -928,10 +936,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
if (!heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
- heap_->local_embedder_heap_tracer()->Trace(
- wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::
- DO_NOT_FORCE_COMPLETION));
+ heap_->local_embedder_heap_tracer()->Trace(wrapper_deadline);
}
} else {
Step(step_size_in_bytes, completion_action, step_origin);
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 1a916693ba..0fb5e11651 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -209,13 +209,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
MaybeObject* value);
- V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
- Object* value);
- V8_INLINE void RecordWrites(HeapObject* obj);
+ void RevisitObject(HeapObject* obj);
void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
Object* value);
- void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
+ void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, HeapObject* value);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
@@ -301,8 +299,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map* map, HeapObject* obj);
- void RevisitObject(HeapObject* obj);
-
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 577c4a5576..5e4610257e 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -47,6 +47,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
+ DCHECK(!invalidated_object_->IsFiller());
invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map());
}
@@ -56,10 +57,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset >= invalidated_object_size_) {
- // A new object could have been allocated during evacuation in the free
- // space outside the object. Since objects are not invalidated in GC pause
- // we can return true here.
- return true;
+ return slots_in_free_space_are_valid_;
}
return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
}
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 85430e58bc..42042c63ef 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -9,8 +9,15 @@ namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
+ // Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
- chunk->owner()->identity() == OLD_SPACE);
+ chunk->InOldSpace() || chunk->InLargeObjectSpace());
+ // The sweeper removes invalid slots and makes free space available for
+ // allocation. Slots for new objects can be recorded in the free space.
+ // Note that we cannot simply check for SweepingDone because pages in large
+ // object space are not swept but have SweepingDone() == true.
+ slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
+
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index e9410575a3..641e8feb91 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -42,6 +42,7 @@ class InvalidatedSlotsFilter {
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
+ bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
new file mode 100644
index 0000000000..7263387465
--- /dev/null
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -0,0 +1,109 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+#define V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+
+#include "src/heap/local-allocator.h"
+
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationResult LocalAllocator::Allocate(AllocationSpace space,
+ int object_size,
+ AllocationAlignment alignment) {
+ switch (space) {
+ case NEW_SPACE:
+ return AllocateInNewSpace(object_size, alignment);
+ case OLD_SPACE:
+ return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
+ alignment);
+ case CODE_SPACE:
+ return compaction_spaces_.Get(CODE_SPACE)
+ ->AllocateRaw(object_size, alignment);
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LocalAllocator::FreeLast(AllocationSpace space, HeapObject* object,
+ int object_size) {
+ switch (space) {
+ case NEW_SPACE:
+ FreeLastInNewSpace(object, object_size);
+ return;
+ case OLD_SPACE:
+ FreeLastInOldSpace(object, object_size);
+ return;
+ default:
+ // Only new and old space supported.
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LocalAllocator::FreeLastInNewSpace(HeapObject* object, int object_size) {
+ if (!new_space_lab_.TryFreeLast(object, object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object->address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void LocalAllocator::FreeLastInOldSpace(HeapObject* object, int object_size) {
+ if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object->address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+AllocationResult LocalAllocator::AllocateInLAB(int object_size,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ CHECK(!allocation.IsRetry());
+ }
+ }
+ return allocation;
+}
+
+bool LocalAllocator::NewLocalAllocationBuffer() {
+ if (lab_allocation_will_fail_) return false;
+ LocalAllocationBuffer saved_lab_ = new_space_lab_;
+ AllocationResult result =
+ new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (new_space_lab_.IsValid()) {
+ new_space_lab_.TryMerge(&saved_lab_);
+ return true;
+ }
+ new_space_lab_ = saved_lab_;
+ lab_allocation_will_fail_ = true;
+ return false;
+}
+
+AllocationResult LocalAllocator::AllocateInNewSpace(
+ int object_size, AllocationAlignment alignment) {
+ if (object_size > kMaxLabObjectSize) {
+ return new_space_->AllocateRawSynchronized(object_size, alignment);
+ }
+ return AllocateInLAB(object_size, alignment);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_LOCAL_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 4646a3783d..e84c7188c2 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -41,95 +41,19 @@ class LocalAllocator {
}
}
- AllocationResult Allocate(AllocationSpace space, int object_size,
- AllocationAlignment alignment) {
- switch (space) {
- case NEW_SPACE:
- return AllocateInNewSpace(object_size, alignment);
- case OLD_SPACE:
- return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
- alignment);
- case CODE_SPACE:
- return compaction_spaces_.Get(CODE_SPACE)
- ->AllocateRaw(object_size, alignment);
- default:
- UNREACHABLE();
- break;
- }
- }
-
- void FreeLast(AllocationSpace space, HeapObject* object, int object_size) {
- switch (space) {
- case NEW_SPACE:
- FreeLastInNewSpace(object, object_size);
- return;
- case OLD_SPACE:
- FreeLastInOldSpace(object, object_size);
- return;
- default:
- // Only new and old space supported.
- UNREACHABLE();
- break;
- }
- }
+ inline AllocationResult Allocate(AllocationSpace space, int object_size,
+ AllocationAlignment alignment);
+ inline void FreeLast(AllocationSpace space, HeapObject* object,
+ int object_size);
private:
- AllocationResult AllocateInNewSpace(int object_size,
- AllocationAlignment alignment) {
- if (object_size > kMaxLabObjectSize) {
- return new_space_->AllocateRawSynchronized(object_size, alignment);
- }
- return AllocateInLAB(object_size, alignment);
- }
-
- inline bool NewLocalAllocationBuffer() {
- if (lab_allocation_will_fail_) return false;
- LocalAllocationBuffer saved_lab_ = new_space_lab_;
- AllocationResult result =
- new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
- new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
- if (new_space_lab_.IsValid()) {
- new_space_lab_.TryMerge(&saved_lab_);
- return true;
- }
- new_space_lab_ = saved_lab_;
- lab_allocation_will_fail_ = true;
- return false;
- }
-
- AllocationResult AllocateInLAB(int object_size,
- AllocationAlignment alignment) {
- AllocationResult allocation;
- if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
- }
- allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
- if (!NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
- } else {
- allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- CHECK(!allocation.IsRetry());
- }
- }
- return allocation;
- }
-
- void FreeLastInNewSpace(HeapObject* object, int object_size) {
- if (!new_space_lab_.TryFreeLast(object, object_size)) {
- // We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
- ClearRecordedSlots::kNo);
- }
- }
-
- void FreeLastInOldSpace(HeapObject* object, int object_size) {
- if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
- // We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
- ClearRecordedSlots::kNo);
- }
- }
+ inline AllocationResult AllocateInNewSpace(int object_size,
+ AllocationAlignment alignment);
+ inline bool NewLocalAllocationBuffer();
+ inline AllocationResult AllocateInLAB(int object_size,
+ AllocationAlignment alignment);
+ inline void FreeLastInNewSpace(HeapObject* object, int object_size);
+ inline void FreeLastInOldSpace(HeapObject* object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index d200671955..466a89080b 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -161,30 +161,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitWeakCell(Map* map, WeakCell* weak_cell) {
- // Enqueue weak cell in linked list of encountered weak collections.
- // We can ignore weak cells with cleared values because they will always
- // contain smi zero.
- if (!weak_cell->cleared()) {
- HeapObject* value = HeapObject::cast(weak_cell->value());
- if (marking_state()->IsBlackOrGrey(value)) {
- // Weak cells with live values are directly processed here to reduce
- // the processing time of weak cells during the main GC pause.
- Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- collector_->RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- } else {
- // If we do not know about liveness of values of weak cells, we have to
- // process them when we know the liveness of the whole transitive
- // closure.
- collector_->AddWeakCell(weak_cell);
- }
- }
- return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointer(HeapObject* host, Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -578,6 +554,8 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
return iterator(chunk_, bitmap_, end_);
}
+Isolate* MarkCompactCollectorBase::isolate() { return heap()->isolate(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index af56c72418..dea105943a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -20,7 +20,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
-#include "src/heap/local-allocator.h"
+#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -1099,7 +1099,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
- collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
+ GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
@@ -1249,8 +1249,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
bool AbortCompactionForTesting(HeapObject* object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
- Page::kPageAlignmentMask & ~kPointerAlignmentMask;
- if ((object->address() & Page::kPageAlignmentMask) == mask) {
+ kPageAlignmentMask & ~kPointerAlignmentMask;
+ if ((object->address() & kPageAlignmentMask) == mask) {
Page* page = Page::FromAddress(object->address());
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
@@ -1635,8 +1635,7 @@ void MarkCompactCollector::PerformWrapperTracing() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ std::numeric_limits<double>::infinity());
}
}
@@ -1784,6 +1783,18 @@ void MarkCompactCollector::MarkLiveObjects() {
DCHECK(marking_worklist()->IsEmpty());
+ // Mark objects reachable through the embedder heap. This phase is
+ // opportunistic as it may not discover graphs that are only reachable
+ // through ephemerons.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
+ while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) {
+ PerformWrapperTracing();
+ ProcessMarkingWorklist();
+ }
+ DCHECK(marking_worklist()->IsEmpty());
+ }
+
// The objects reachable from the roots are marked, yet unreachable objects
// are unmarked. Mark objects reachable due to embedder heap tracing or
// harmony weak maps.
@@ -1874,13 +1885,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// cleared.
ClearFullMapTransitions();
}
- ClearWeakCells();
ClearWeakReferences();
MarkDependentCodeForDeoptimization();
ClearWeakCollections();
- DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
@@ -1939,7 +1948,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array->GetTargetIfExists(0, isolate(), &map)) {
- DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
+ DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet.
Map* parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
@@ -2077,46 +2086,6 @@ void MarkCompactCollector::ClearWeakCollections() {
}
}
-void MarkCompactCollector::ClearWeakCells() {
- Heap* heap = this->heap();
- TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
- WeakCell* weak_cell;
- while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
- // We do not insert cleared weak cells into the list, so the value
- // cannot be a Smi here.
- HeapObject* value = HeapObject::cast(weak_cell->value());
- if (!non_atomic_marking_state()->IsBlackOrGrey(value)) {
- // Cells for new-space objects embedded in optimized code are wrapped in
- // WeakCell and put into Heap::weak_object_to_code_table.
- // Such cells do not have any strong references but we want to keep them
- // alive as long as the cell value is alive.
- // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
- if (value->IsCell()) {
- Object* cell_value = Cell::cast(value)->value();
- if (cell_value->IsHeapObject() &&
- non_atomic_marking_state()->IsBlackOrGrey(
- HeapObject::cast(cell_value))) {
- // Resurrect the cell.
- non_atomic_marking_state()->WhiteToBlack(value);
- Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
- RecordSlot(value, slot, HeapObject::cast(*slot));
- slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- } else {
- weak_cell->clear();
- }
- } else {
- // All other objects.
- weak_cell->clear();
- }
- } else {
- // The value of the weak cell is alive.
- Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- }
- }
-}
-
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
std::pair<HeapObject*, HeapObjectReference**> slot;
@@ -2140,7 +2109,6 @@ void MarkCompactCollector::ClearWeakReferences() {
}
void MarkCompactCollector::AbortWeakObjects() {
- weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
weak_objects_.ephemeron_hash_tables.Clear();
weak_objects_.current_ephemerons.Clear();
@@ -2195,6 +2163,8 @@ static inline SlotCallbackResult UpdateSlot(
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+ } else {
+ DCHECK(heap_obj->map()->IsMap());
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -2301,7 +2271,14 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- return String::cast(map_word.ToForwardingAddress());
+ String* new_string = String::cast(map_word.ToForwardingAddress());
+
+ if (new_string->IsExternalString()) {
+ heap->ProcessMovedExternalString(
+ Page::FromAddress(reinterpret_cast<Address>(*p)),
+ Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ }
+ return new_string;
}
return String::cast(*p);
@@ -3026,13 +3003,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
+ const auto check_and_update_old_to_new_slot_fn =
+ [this](MaybeObject** slot) {
+ return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
+ };
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, slot_type, slot, [this](MaybeObject** slot) {
- return CheckAndUpdateOldToNewSlot(
- reinterpret_cast<Address>(slot));
- });
+ heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
});
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -3896,7 +3874,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -3913,7 +3891,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 169f2ae671..d62c964336 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -257,7 +257,7 @@ class MarkCompactCollectorBase {
virtual void CollectGarbage() = 0;
inline Heap* heap() const { return heap_; }
- inline Isolate* isolate() { return heap()->isolate(); }
+ inline Isolate* isolate();
protected:
static const int kMainThread = 0;
@@ -420,7 +420,6 @@ typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking.
struct WeakObjects {
- Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
@@ -647,10 +646,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
- void AddWeakCell(WeakCell* weak_cell) {
- weak_objects_.weak_cells.Push(kMainThread, weak_cell);
- }
-
void AddTransitionArray(TransitionArray* array) {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
@@ -810,11 +805,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The linked list of all encountered weak maps is destroyed.
void ClearWeakCollections();
- // Goes through the list of encountered weak cells and clears those with
+ // Goes through the list of encountered weak references and clears those with
// dead values. If the value is a dead map and the parent map transitions to
// the dead map via weak cell, then this function also clears the map
// transition.
- void ClearWeakCells();
void ClearWeakReferences();
void AbortWeakObjects();
@@ -926,7 +920,6 @@ class MarkingVisitor final
V8_INLINE int VisitMap(Map* map, Map* object);
V8_INLINE int VisitNativeContext(Map* map, Context* object);
V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
- V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
// ObjectVisitor implementation.
V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 6c7a26b672..ac7bcb8087 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -341,6 +341,9 @@ class ObjectStatsCollectorImpl {
ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated,
CowMode check_cow_array = kCheckCow);
+ void RecordExternalResourceStats(Address resource,
+ ObjectStats::VirtualInstanceType type,
+ size_t size);
// Gets size from |ob| and assumes no over allocating.
bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
ObjectStats::VirtualInstanceType type);
@@ -379,6 +382,7 @@ class ObjectStatsCollectorImpl {
void RecordVirtualJSObjectDetails(JSObject* object);
void RecordVirtualMapDetails(Map* map);
void RecordVirtualScriptDetails(Script* script);
+ void RecordVirtualExternalStringDetails(ExternalString* script);
void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
void RecordVirtualJSFunctionDetails(JSFunction* function);
@@ -388,6 +392,7 @@ class ObjectStatsCollectorImpl {
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject*> virtual_objects_;
+ std::unordered_set<Address> external_resources_;
FieldStatsCollector field_stats_collector_;
};
@@ -431,8 +436,9 @@ bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated, CowMode check_cow_array) {
- if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array))
+ if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array)) {
return false;
+ }
if (virtual_objects_.find(obj) == virtual_objects_.end()) {
virtual_objects_.insert(obj);
@@ -442,6 +448,14 @@ bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
return false;
}
+void ObjectStatsCollectorImpl::RecordExternalResourceStats(
+ Address resource, ObjectStats::VirtualInstanceType type, size_t size) {
+ if (external_resources_.find(resource) == external_resources_.end()) {
+ external_resources_.insert(resource);
+ stats_->RecordVirtualObjectStats(type, size, 0);
+ }
+}
+
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
AllocationSite* site) {
if (!site->PointsToLiteral()) return;
@@ -663,6 +677,8 @@ void ObjectStatsCollectorImpl::CollectStatistics(
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsExternalString()) {
+ RecordVirtualExternalStringDetails(ExternalString::cast(obj));
} else if (obj->IsArrayBoilerplateDescription()) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
@@ -704,14 +720,13 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
ObjectStats::RETAINED_MAPS_TYPE);
- // FixedArrayOfWeakCells.
+ // WeakArrayList.
RecordSimpleVirtualObjectStats(
- nullptr,
- FixedArrayOfWeakCells::cast(heap_->noscript_shared_function_infos()),
+ nullptr, WeakArrayList::cast(heap_->noscript_shared_function_infos()),
ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- RecordSimpleVirtualObjectStats(
- nullptr, FixedArrayOfWeakCells::cast(heap_->script_list()),
- ObjectStats::SCRIPT_LIST_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr,
+ WeakArrayList::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
// HashTable.
RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
@@ -776,24 +791,44 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
// Log the size of external source code.
- Object* source = script->source();
- if (source->IsExternalString()) {
+ Object* raw_source = script->source();
+ if (raw_source->IsExternalString()) {
// The contents of external strings aren't on the heap, so we have to record
- // them manually.
- ExternalString* external_source_string = ExternalString::cast(source);
- size_t off_heap_size = external_source_string->ExternalPayloadSize();
- size_t on_heap_size = external_source_string->Size();
- RecordVirtualObjectStats(script, external_source_string,
- ObjectStats::SCRIPT_SOURCE_EXTERNAL_TYPE,
- on_heap_size + off_heap_size,
- ObjectStats::kNoOverAllocation);
- } else if (source->IsHeapObject()) {
+ // them manually. The on-heap String object is recorded indepentendely in
+ // the normal pass.
+ ExternalString* string = ExternalString::cast(raw_source);
+ Address resource = string->resource_as_address();
+ size_t off_heap_size = string->ExternalPayloadSize();
+ RecordExternalResourceStats(
+ resource,
+ string->IsOneByteRepresentation()
+ ? ObjectStats::SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE
+ : ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
+ off_heap_size);
+ } else if (raw_source->IsString()) {
+ String* source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
- script, HeapObject::cast(source),
- ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TYPE);
+ script, HeapObject::cast(raw_source),
+ source->IsOneByteRepresentation()
+ ? ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE
+ : ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE);
}
}
+void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
+ ExternalString* string) {
+ // Track the external string resource size in a separate category.
+
+ Address resource = string->resource_as_address();
+ size_t off_heap_size = string->ExternalPayloadSize();
+ RecordExternalResourceStats(
+ resource,
+ string->IsOneByteRepresentation()
+ ? ObjectStats::STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE
+ : ObjectStats::STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE,
+ off_heap_size);
+}
+
void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
SharedFunctionInfo* info) {
// Uncompiled SharedFunctionInfo gets its own category.
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index a21b7f749f..7914f09881 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -58,11 +58,15 @@
V(RETAINED_MAPS_TYPE) \
V(SCRIPT_LIST_TYPE) \
V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
- V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
- V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE) \
V(SERIALIZED_OBJECTS_TYPE) \
V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
V(STRING_SPLIT_CACHE_TYPE) \
+ V(STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE) \
+ V(STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE) \
V(SOURCE_POSITION_TABLE_TYPE) \
V(UNCOMPILED_JS_FUNCTION_TYPE) \
V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 54229416b7..f32bbc1914 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index cdb7c917b0..63ef8fb353 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -23,47 +23,48 @@ class DataHandler;
class JSArrayBuffer;
class JSRegExp;
class JSWeakCollection;
+class UncompiledDataWithoutPreParsedScope;
class UncompiledDataWithPreParsedScope;
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(JSArrayBuffer) \
- V(JSFunction) \
- V(JSObject) \
- V(JSWeakCollection) \
- V(Map) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject) \
- V(WeakCell)
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(DataHandler) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(Oddball) \
+ V(PreParsedScopeData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreParsedScope) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(WasmInstanceObject)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 95b7b5b9d5..e59457b10d 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
-#include "src/assembler.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
+#include "src/reloc-info.h"
#include "src/v8memory.h"
namespace v8 {
@@ -284,8 +284,7 @@ class UpdateTypedSlotHelper {
callback(reinterpret_cast<MaybeObject**>(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory::Address_at(entry_address) =
- reinterpret_cast<Code*>(code)->entry();
+ Memory<Address>(entry_address) = reinterpret_cast<Code*>(code)->entry();
}
return result;
}
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index e581ebe571..649292085a 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,6 +6,8 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+
+#include "src/heap/local-allocator-inl.h"
#include "src/objects-inl.h"
#include "src/objects/map.h"
@@ -146,7 +148,8 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
ThinString* object, int object_size) {
if (!is_incremental_marking_) {
// Loading actual is fine in a parallel setting is there is no write.
- HeapObject* actual = object->actual();
+ String* actual = object->actual();
+ object->set_length(0);
*slot = actual;
// ThinStrings always refer to internalized strings, which are
// always in old space.
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index d9f920ef7e..f8c6d496ce 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -202,5 +202,11 @@ void RootScavengeVisitor::ScavengePointer(Object** p) {
reinterpret_cast<HeapObject*>(object));
}
+RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
+ : scavenger_(scavenger) {}
+
+ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
+ : scavenger_(scavenger) {}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 847a5b07fc..4e6753f6ce 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -114,7 +114,7 @@ class Scavenger {
// filtering out non-HeapObjects and objects which do not reside in new space.
class RootScavengeVisitor final : public RootVisitor {
public:
- explicit RootScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
+ explicit RootScavengeVisitor(Scavenger* scavenger);
void VisitRootPointer(Root root, const char* description, Object** p) final;
void VisitRootPointers(Root root, const char* description, Object** start,
@@ -128,7 +128,7 @@ class RootScavengeVisitor final : public RootVisitor {
class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
- explicit ScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
+ explicit ScavengeVisitor(Scavenger* scavenger);
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index fdb142ab56..2742cd9c9d 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -21,6 +21,7 @@
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
@@ -108,15 +109,6 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
- if (!is_js_object) {
- // Eagerly initialize the WeakCell cache for the map as it will not be
- // writable in RO_SPACE.
- HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
- Handle<Map>(map, isolate()), TENURED_READ_ONLY);
- map->set_weak_cell_cache(*weak_cell);
- }
-
return map;
}
@@ -148,14 +140,13 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
- map->set_weak_cell_cache(Smi::kZero);
map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
void Heap::FinalizePartialMap(Map* map) {
ReadOnlyRoots roots(this);
- map->set_dependent_code(DependentCode::cast(roots.empty_fixed_array()));
+ map->set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->set_instance_descriptors(roots.empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -163,13 +154,6 @@ void Heap::FinalizePartialMap(Map* map) {
}
map->set_prototype(roots.null_value());
map->set_constructor_or_backpointer(roots.null_value());
-
- // Eagerly initialize the WeakCell cache for the map as it will not be
- // writable in RO_SPACE.
- HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
- Handle<Map>(map, isolate()), TENURED_READ_ONLY);
- map->set_weak_cell_cache(*weak_cell);
}
AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
@@ -242,7 +226,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
- ALLOCATE_PARTIAL_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell);
#undef ALLOCATE_PARTIAL_MAP
}
@@ -335,7 +318,6 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(roots.meta_map());
- FinalizePartialMap(roots.weak_cell_map());
FinalizePartialMap(roots.fixed_array_map());
FinalizePartialMap(roots.weak_fixed_array_map());
FinalizePartialMap(roots.weak_array_list_map());
@@ -426,8 +408,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
@@ -542,6 +524,20 @@ bool Heap::CreateInitialMaps() {
ObjectBoilerplateDescription::cast(obj));
{
+ // Empty array boilerplate description
+ AllocationResult alloc =
+ Allocate(roots.array_boilerplate_description_map(), RO_SPACE);
+ if (!alloc.To(&obj)) return false;
+
+ ArrayBoilerplateDescription::cast(obj)->set_constant_elements(
+ roots.empty_fixed_array());
+ ArrayBoilerplateDescription::cast(obj)->set_elements_kind(
+ ElementsKind::PACKED_SMI_ELEMENTS);
+ }
+ set_empty_array_boilerplate_description(
+ ArrayBoilerplateDescription::cast(obj));
+
+ {
AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
@@ -573,7 +569,7 @@ bool Heap::CreateInitialMaps() {
set_empty_property_array(PropertyArray::cast(obj));
}
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
{ \
FixedTypedArrayBase* obj; \
if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
@@ -770,20 +766,13 @@ void Heap::CreateInitialObjects() {
set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
}
- {
- Handle<WeakCell> cell =
- factory->NewWeakCell(factory->undefined_value(), TENURED_READ_ONLY);
- set_empty_weak_cell(*cell);
- cell->clear();
- }
-
- set_detached_contexts(roots.empty_fixed_array());
+ set_detached_contexts(roots.empty_weak_array_list());
set_retained_maps(roots.empty_weak_array_list());
- set_retaining_path_targets(roots.undefined_value());
+ set_retaining_path_targets(roots.empty_weak_array_list());
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
- set_script_list(Smi::kZero);
+ set_script_list(roots.empty_weak_array_list());
Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
@@ -883,9 +872,7 @@ void Heap::CreateInitialObjects() {
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
- set_weak_stack_trace_list(Smi::kZero);
-
- set_noscript_shared_function_infos(Smi::kZero);
+ set_noscript_shared_function_infos(roots.empty_weak_array_list());
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
set_deserialize_lazy_handler(Smi::kZero);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 2ddcf6cf36..9e86905d00 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -468,13 +468,6 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-
-LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
- return LocalAllocationBuffer(
- nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
-}
-
-
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 59ce145474..ff28ab56b2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -17,10 +17,13 @@
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
@@ -799,6 +802,14 @@ bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE;
}
+bool MemoryChunk::InOldSpace() const {
+ return owner()->identity() == OLD_SPACE;
+}
+
+bool MemoryChunk::InLargeObjectSpace() const {
+ return owner()->identity() == LO_SPACE;
+}
+
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -939,9 +950,11 @@ void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
} else {
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
}
}
@@ -949,8 +962,10 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
} else {
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
}
}
@@ -1234,7 +1249,7 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
DCHECK_EQ(start % kPointerSize, 0);
DCHECK_EQ(size % kPointerSize, 0);
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = static_cast<Address>(zap_value);
+ Memory<Address>(start + s) = static_cast<Address>(zap_value);
}
}
@@ -1426,6 +1441,22 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
+void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
+ HeapObject* new_start) {
+ DCHECK_LT(old_start, new_start);
+ DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
+ MemoryChunk::FromHeapObject(new_start));
+ if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
+ auto it = invalidated_slots()->find(old_start);
+ if (it != invalidated_slots()->end()) {
+ int old_size = it->second;
+ int delta = static_cast<int>(new_start->address() - old_start->address());
+ invalidated_slots()->erase(it);
+ (*invalidated_slots())[new_start] = old_size - delta;
+ }
+ }
+}
+
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1989,7 +2020,11 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
- if (object->IsJSArrayBuffer()) {
+ if (object->IsExternalString()) {
+ ExternalString* external_string = ExternalString::cast(object);
+ size_t size = external_string->ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = NumberToSize(array_buffer->byte_length());
@@ -2473,7 +2508,11 @@ void NewSpace::Verify(Isolate* isolate) {
int size = object->Size();
object->IterateBody(map, size, &visitor);
- if (object->IsJSArrayBuffer()) {
+ if (object->IsExternalString()) {
+ ExternalString* external_string = ExternalString::cast(object);
+ size_t size = external_string->ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = NumberToSize(array_buffer->byte_length());
@@ -2574,7 +2613,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
@@ -2613,7 +2652,7 @@ void SemiSpace::RewindPages(int num_pages) {
}
bool SemiSpace::ShrinkTo(size_t new_capacity) {
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index dbd0d82008..47272501f3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -110,7 +110,7 @@ class Space;
// Some assertion macros used in the debugging mode.
#define DCHECK_PAGE_ALIGNED(address) \
- DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+ DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
#define DCHECK_OBJECT_ALIGNED(address) \
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
@@ -312,7 +312,11 @@ class MemoryChunk {
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18
};
using Flags = uintptr_t;
@@ -403,7 +407,6 @@ class MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
@@ -512,6 +515,9 @@ class MemoryChunk {
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
+ // Updates invalidated_slots after array left-trimming.
+ void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
+ HeapObject* new_start);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker();
@@ -623,6 +629,10 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+ bool InOldSpace() const;
+
+ bool InLargeObjectSpace() const;
+
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
@@ -758,7 +768,8 @@ class Page : public MemoryChunk {
// Page flags copied from from-space to to-space when flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
@@ -766,6 +777,10 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
}
+ static Page* FromHeapObject(const HeapObject* o) {
+ return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
+ ~kAlignmentMask);
+ }
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
@@ -1196,7 +1211,7 @@ class SkipList {
}
static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+ return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
}
static void Update(Address addr, int size) {
@@ -1990,7 +2005,10 @@ class LocalAllocationBuffer {
// Indicates that a buffer cannot be used for allocations anymore. Can result
// from either reassigning a buffer, or trying to construct it from an
// invalid {AllocationResult}.
- static inline LocalAllocationBuffer InvalidBuffer();
+ static LocalAllocationBuffer InvalidBuffer() {
+ return LocalAllocationBuffer(
+ nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
+ }
// Creates a new LAB from a given {AllocationResult}. Results in
// InvalidBuffer if the result indicates a retry.
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
new file mode 100644
index 0000000000..4609c83ca0
--- /dev/null
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STORE_BUFFER_INL_H_
+#define V8_HEAP_STORE_BUFFER_INL_H_
+
+#include "src/heap/store-buffer.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void StoreBuffer::InsertDeletionIntoStoreBuffer(Address start, Address end) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+}
+
+void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index d73e3235c1..b428a82046 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -10,6 +10,7 @@
#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/store-buffer-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -30,22 +31,28 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
void StoreBuffer::SetUp() {
- // Allocate 3x the buffer size, so that we can start the new store buffer
- // aligned to 2x the size. This lets us use a bit test to detect the end of
- // the area.
+ const size_t requested_size = kStoreBufferSize * kStoreBuffers;
+ // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
+ // use a bit test to detect the ends of the buffers.
+ const size_t alignment =
+ std::max<size_t>(kStoreBufferSize, AllocatePageSize());
+ void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation;
- if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
- &reservation)) {
+ if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
+ &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
+
Address start = reservation.address();
- start_[0] = reinterpret_cast<Address*>(::RoundUp(start, kStoreBufferSize));
+ const size_t allocated_size = reservation.size();
+
+ start_[0] = reinterpret_cast<Address*>(start);
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
- Address* vm_limit = reinterpret_cast<Address*>(start + reservation.size());
-
+ // Sanity check the buffers.
+ Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
@@ -55,8 +62,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
}
- if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
- kStoreBufferSize * kStoreBuffers,
+ // Set RW permissions only on the pages we use.
+ const size_t used_size = RoundUp(requested_size, CommitPageSize());
+ if (!reservation.SetPermissions(start, used_size,
PageAllocator::kReadWrite)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
@@ -65,7 +73,6 @@ void StoreBuffer::SetUp() {
virtual_memory_.TakeControl(&reservation);
}
-
void StoreBuffer::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr;
@@ -76,6 +83,48 @@ void StoreBuffer::TearDown() {
}
}
+void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+}
+
+void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertIntoStoreBuffer(slot);
+}
+
+void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end) {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(store_buffer->Empty());
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+}
+
+void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot) {
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+}
+
+void StoreBuffer::SetMode(StoreBufferMode mode) {
+ mode_ = mode;
+ if (mode == NOT_IN_GC) {
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
+ } else {
+ insertion_callback = &InsertDuringGarbageCollection;
+ deletion_callback = &DeleteDuringGarbageCollection;
+ }
+}
+
int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index e447b2f74e..d2c0f9b75f 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -34,6 +34,15 @@ class StoreBuffer {
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
+ static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end);
+ static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot);
+
+ static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end);
+ static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
+
explicit StoreBuffer(Heap* heap);
void SetUp();
void TearDown();
@@ -61,6 +70,16 @@ class StoreBuffer {
return address & ~kDeletionTag;
}
+ inline void InsertDeletionIntoStoreBuffer(Address start, Address end);
+ inline void InsertIntoStoreBuffer(Address slot);
+
+ void InsertEntry(Address slot) {
+ // Insertions coming from the GC are directly inserted into the remembered
+ // set. Insertions coming from the runtime are added to the store buffer to
+ // allow concurrent processing.
+ insertion_callback(this, slot);
+ }
+
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
@@ -71,72 +90,7 @@ class StoreBuffer {
deletion_callback(this, start, end);
}
- static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
- Address start, Address end) {
- // In GC the store buffer has to be empty at any time.
- DCHECK(store_buffer->Empty());
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
- }
-
- static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
- Address end) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertDeletionIntoStoreBuffer(start, end);
- }
-
- void InsertDeletionIntoStoreBuffer(Address start, Address end) {
- if (top_ + sizeof(Address) * 2 > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = MarkDeletionAddress(start);
- top_++;
- *top_ = end;
- top_++;
- }
-
- static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot) {
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
- }
-
- static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertIntoStoreBuffer(slot);
- }
-
- void InsertIntoStoreBuffer(Address slot) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
- }
-
- void InsertEntry(Address slot) {
- // Insertions coming from the GC are directly inserted into the remembered
- // set. Insertions coming from the runtime are added to the store buffer to
- // allow concurrent processing.
- insertion_callback(this, slot);
- }
-
- void SetMode(StoreBufferMode mode) {
- mode_ = mode;
- if (mode == NOT_IN_GC) {
- insertion_callback = &InsertDuringRuntime;
- deletion_callback = &DeleteDuringRuntime;
- } else {
- insertion_callback = &InsertDuringGarbageCollection;
- deletion_callback = &DeleteDuringGarbageCollection;
- }
- }
+ void SetMode(StoreBufferMode mode);
// Used by the concurrent processing thread to transfer entries from the
// store buffer to the remembered set.
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 8b62213cb6..9e622c3385 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -279,7 +279,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
@@ -319,7 +319,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(