summaryrefslogtreecommitdiff
path: root/deps/v8/src/snapshot/deserializer.cc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-01-24 20:16:06 +0100
committerMyles Borins <mylesborins@google.com>2018-01-24 15:02:20 -0800
commit4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch)
tree3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/snapshot/deserializer.cc
parentfa9f31a4fda5a3782c652e56e394465805ebb50f (diff)
downloadnode-new-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/snapshot/deserializer.cc')
-rw-r--r--deps/v8/src/snapshot/deserializer.cc430
1 files changed, 148 insertions, 282 deletions
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 1eb15d6c38..5d7d551c98 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,134 +4,18 @@
#include "src/snapshot/deserializer.h"
-#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/bootstrapper.h"
-#include "src/deoptimizer.h"
-#include "src/external-reference-table.h"
-#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/string.h"
+#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/startup-deserializer.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
-void Deserializer::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].size());
- STATIC_ASSERT(NEW_SPACE == 0);
- int current_space = NEW_SPACE;
- for (auto& r : res) {
- reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-void Deserializer::RegisterDeserializedObjectsForBlackAllocation() {
- isolate_->heap()->RegisterDeserializedObjectsForBlackAllocation(
- reservations_, deserialized_large_objects_, allocated_maps_);
-}
-
-bool Deserializer::ReserveSpace() {
-#ifdef DEBUG
- for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- DCHECK(reservations_[i].size() > 0);
- }
-#endif // DEBUG
- DCHECK(allocated_maps_.empty());
- if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
- return false;
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-// static
-bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer) {
- const int first_space = NEW_SPACE;
- const int last_space = SerializerDeserializer::kNumberOfSpaces;
- Isolate* isolate = startup_deserializer->isolate();
-
- // Create a set of merged reservations to reserve space in one go.
- // The BuiltinDeserializer's reservations are ignored, since our actual
- // requirements vary based on whether lazy deserialization is enabled.
- // Instead, we manually determine the required code-space.
-
- DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
- Heap::Reservation merged_reservations[kNumberOfSpaces];
- for (int i = first_space; i < last_space; i++) {
- merged_reservations[i] = startup_deserializer->reservations_[i];
- }
-
- Heap::Reservation builtin_reservations =
- builtin_deserializer->CreateReservationsForEagerBuiltins();
- DCHECK(!builtin_reservations.empty());
-
- for (const auto& c : builtin_reservations) {
- merged_reservations[CODE_SPACE].push_back(c);
- }
-
- if (!isolate->heap()->ReserveSpace(merged_reservations,
- &startup_deserializer->allocated_maps_)) {
- return false;
- }
-
- DisallowHeapAllocation no_allocation;
-
- // Distribute the successful allocations between both deserializers.
- // There's nothing to be done here except for code space.
-
- {
- const int num_builtin_reservations =
- static_cast<int>(builtin_reservations.size());
- for (int i = num_builtin_reservations - 1; i >= 0; i--) {
- const auto& c = merged_reservations[CODE_SPACE].back();
- DCHECK_EQ(c.size, builtin_reservations[i].size);
- DCHECK_EQ(c.size, c.end - c.start);
- builtin_reservations[i].start = c.start;
- builtin_reservations[i].end = c.end;
- merged_reservations[CODE_SPACE].pop_back();
- }
-
- builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
- }
-
- // Write back startup reservations.
-
- for (int i = first_space; i < last_space; i++) {
- startup_deserializer->reservations_[i].swap(merged_reservations[i]);
- }
-
- for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
- startup_deserializer->high_water_[i] =
- startup_deserializer->reservations_[i][0].start;
- builtin_deserializer->high_water_[i] = nullptr;
- }
-
- return true;
-}
-
-bool Deserializer::ReservesOnlyCodeSpace() const {
- for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
- if (space == CODE_SPACE) continue;
- const auto& r = reservations_[space];
- for (const Heap::Chunk& c : r)
- if (c.size != 0) return false;
- }
- return true;
-}
-
-void Deserializer::Initialize(Isolate* isolate) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
@@ -150,60 +34,65 @@ void Deserializer::Initialize(Isolate* isolate) {
SerializedData::ComputeMagicNumber(external_reference_table_));
}
-void Deserializer::SortMapDescriptors() {
- for (const auto& address : allocated_maps_) {
- Map* map = Map::cast(HeapObject::FromAddress(address));
- if (map->instance_descriptors()->number_of_descriptors() > 1) {
- map->instance_descriptors()->Sort();
- }
- }
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
+ return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
}
-bool Deserializer::IsLazyDeserializationEnabled() const {
- return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Rehash() {
+ DCHECK(can_rehash() || deserializing_user_code());
+ for (const auto& item : to_rehash_) item->RehashBasedOnMap();
}
-Deserializer::~Deserializer() {
+template <class AllocatorT>
+Deserializer<AllocatorT>::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- int chunk_index = current_chunk_[space];
- DCHECK_EQ(reservations_[space].size(), chunk_index + 1);
- DCHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
- }
- DCHECK_EQ(allocated_maps_.size(), next_map_index_);
+ // Check that we've fully used all reserved space.
+ DCHECK(allocator()->ReservationsAreFullyUsed());
#endif // DEBUG
}
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ // Builtins and bytecode handlers are deserialized in a separate pass by the
+ // BuiltinDeserializer.
+ if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+
// The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadData(start, end, NEW_SPACE, NULL);
+ // to update the remembered using nullptr as the address.
+ ReadData(start, end, NEW_SPACE, nullptr);
}
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Synchronize(
+ VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
- deserializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
}
-void Deserializer::DeserializeDeferredObjects() {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(code);
+ case kAlignmentPrefix + 2: {
+ int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
default: {
int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
+ DCHECK_LE(space, kNumberOfSpaces);
+ DCHECK_EQ(code - space, kNewObject);
HeapObject* object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
@@ -241,24 +130,33 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
return string->hash_field();
}
-HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
- if (deserializing_user_code()) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
+ int space) {
+ if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj->IsString()) {
+ // Uninitialize hash field as we need to recompute the hash.
String* string = String::cast(obj);
- // Uninitialize hash field as the hash seed may have changed.
string->set_hash_field(String::kEmptyHashField);
+ } else if (obj->NeedsRehashing()) {
+ to_rehash_.push_back(obj);
+ }
+ }
+
+ if (deserializing_user_code()) {
+ if (obj->IsString()) {
+ String* string = String::cast(obj);
if (string->IsInternalizedString()) {
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
- if (canonical == NULL) {
- new_internalized_strings_.push_back(handle(string));
- return string;
- } else {
- string->SetForwardedInternalizedString(canonical);
- return canonical;
- }
+ String* canonical =
+ StringTable::ForwardStringIfExists(isolate_, &key, string);
+
+ if (canonical != nullptr) return canonical;
+
+ new_internalized_strings_.push_back(handle(string));
+ return string;
}
} else if (obj->IsScript()) {
new_scripts_.push_back(handle(Script::cast(obj)));
@@ -290,6 +188,10 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (isolate_->external_reference_redirector()) {
accessor_infos_.push_back(AccessorInfo::cast(obj));
}
+ } else if (obj->IsCallHandlerInfo()) {
+ if (isolate_->external_reference_redirector()) {
+ call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
+ }
} else if (obj->IsExternalOneByteString()) {
DCHECK(obj->map() == isolate_->heap()->native_source_string_map());
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
@@ -298,6 +200,21 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
isolate_->heap()->RegisterExternalString(string);
+ } else if (obj->IsJSTypedArray()) {
+ JSTypedArray* typed_array = JSTypedArray::cast(obj);
+ CHECK(typed_array->byte_offset()->IsSmi());
+ int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ if (byte_offset > 0) {
+ FixedTypedArrayBase* elements =
+ FixedTypedArrayBase::cast(typed_array->elements());
+ // Must be off-heap layout.
+ DCHECK_NULL(elements->base_pointer());
+
+ void* pointer_with_offset = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(elements->external_pointer()) +
+ byte_offset);
+ elements->set_external_pointer(pointer_with_offset);
+ }
} else if (obj->IsJSArrayBuffer()) {
JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
@@ -315,61 +232,46 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (fta->base_pointer() == nullptr) {
Smi* store_index = reinterpret_cast<Smi*>(fta->external_pointer());
void* backing_store = off_heap_backing_stores_[store_index->value()];
-
fta->set_external_pointer(backing_store);
}
}
- if (FLAG_rehash_snapshot && can_rehash_ && !deserializing_user_code()) {
- if (obj->IsString()) {
- // Uninitialize hash field as we are going to reinitialize the hash seed.
- String* string = String::cast(obj);
- string->set_hash_field(String::kEmptyHashField);
- } else if (obj->IsTransitionArray() &&
- TransitionArray::cast(obj)->number_of_entries() > 1) {
- transition_arrays_.push_back(TransitionArray::cast(obj));
- }
- }
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
return obj;
}
-int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
+template <class AllocatorT>
+int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- return (IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id) &&
- !deserializing_builtins_)
+ return IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id)
? Builtins::kDeserializeLazy
: builtin_id;
}
-HeapObject* Deserializer::GetBackReferencedObject(int space) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
HeapObject* obj;
SerializerReference back_reference =
SerializerReference::FromBitfield(source_.GetInt());
- if (space == LO_SPACE) {
- uint32_t index = back_reference.large_object_index();
- obj = deserialized_large_objects_[index];
- } else if (space == MAP_SPACE) {
- int index = back_reference.map_index();
- DCHECK(index < next_map_index_);
- obj = HeapObject::FromAddress(allocated_maps_[index]);
- } else {
- DCHECK(space < kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = back_reference.chunk_index();
- DCHECK_LE(chunk_index, current_chunk_[space]);
- uint32_t chunk_offset = back_reference.chunk_offset();
- Address address = reservations_[space][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
- address += padding;
- }
- obj = HeapObject::FromAddress(address);
+
+ switch (space) {
+ case LO_SPACE:
+ obj = allocator()->GetLargeObject(back_reference.large_object_index());
+ break;
+ case MAP_SPACE:
+ obj = allocator()->GetMap(back_reference.map_index());
+ break;
+ default:
+ obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
+ back_reference.chunk_index(),
+ back_reference.chunk_offset());
+ break;
}
- if (deserializing_user_code() && obj->IsInternalizedString()) {
- obj = String::cast(obj)->GetForwardedInternalizedString();
+
+ if (deserializing_user_code() && obj->IsThinString()) {
+ obj = ThinString::cast(obj)->actual();
}
+
hot_objects_.Add(obj);
return obj;
}
@@ -379,29 +281,14 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, Object** write_back) {
- Address address;
- HeapObject* obj;
- int size = source_.GetInt() << kObjectAlignmentBits;
-
- if (next_alignment_ != kWordAligned) {
- int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = Allocate(space_number, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- Heap* heap = isolate_->heap();
- DCHECK(heap->free_space_map()->IsMap());
- DCHECK(heap->one_pointer_filler_map()->IsMap());
- DCHECK(heap->two_pointer_filler_map()->IsMap());
- obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
- address = obj->address();
- next_alignment_ = kWordAligned;
- } else {
- address = Allocate(space_number, size);
- obj = HeapObject::FromAddress(address);
- }
+template <class AllocatorT>
+void Deserializer<AllocatorT>::ReadObject(int space_number,
+ Object** write_back) {
+ const int size = source_.GetInt() << kObjectAlignmentBits;
+
+ Address address =
+ allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
+ HeapObject* obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
Object** current = reinterpret_cast<Object**>(address);
@@ -423,46 +310,8 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
#endif // DEBUG
}
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address Deserializer::Allocate(int space_index, int size) {
- if (space_index == LO_SPACE) {
- AlwaysAllocateScope scope(isolate_);
- LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
- Executability exec = static_cast<Executability>(source_.Get());
- AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = result.ToObjectChecked();
- deserialized_large_objects_.push_back(obj);
- return obj->address();
- } else if (space_index == MAP_SPACE) {
- DCHECK_EQ(Map::kSize, size);
- return allocated_maps_[next_map_index_++];
- } else {
- DCHECK(space_index < kNumberOfPreallocatedSpaces);
- Address address = high_water_[space_index];
- DCHECK_NOT_NULL(address);
- high_water_[space_index] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_index];
- int chunk_index = current_chunk_[space_index];
- DCHECK_LE(high_water_[space_index], reservation[chunk_index].end);
-#endif
- if (space_index == CODE_SPACE) SkipList::Update(address, size);
- return address;
- }
-}
-
-Object* Deserializer::ReadDataSingle() {
+template <class AllocatorT>
+Object* Deserializer<AllocatorT>::ReadDataSingle() {
Object* o;
Object** start = &o;
Object** end = start + 1;
@@ -474,14 +323,24 @@ Object* Deserializer::ReadDataSingle() {
return o;
}
-bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
- Address current_object_address) {
+static void NoExternalReferencesCallback() {
+ // The following check will trigger if a function or object template
+ // with references to native functions have been deserialized from
+ // snapshot, but no actual external references were provided when the
+ // isolate was created.
+ CHECK_WITH_MSG(false, "No external references provided via API");
+}
+
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
+ int source_space,
+ Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed =
- (current_object_address != NULL && source_space != NEW_SPACE &&
+ (current_object_address != nullptr && source_space != NEW_SPACE &&
source_space != CODE_SPACE);
while (current < limit) {
byte data = source_.Get();
@@ -618,15 +477,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kNextChunk: {
int space = source_.Get();
- DCHECK(space < kNumberOfPreallocatedSpaces);
- int chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
- CHECK_LT(chunk_index, reservation.size());
- high_water_[space] = reservation[chunk_index].start;
+ allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
break;
}
@@ -691,10 +542,16 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
current = reinterpret_cast<Object**>(
reinterpret_cast<Address>(current) + skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- DCHECK_WITH_MSG(reference_id < num_api_references_,
- "too few external references provided through the API");
- Address address = reinterpret_cast<Address>(
- isolate->api_external_references()[reference_id]);
+ Address address;
+ if (isolate->api_external_references()) {
+ DCHECK_WITH_MSG(
+ reference_id < num_api_references_,
+ "too few external references provided through the API");
+ address = reinterpret_cast<Address>(
+ isolate->api_external_references()[reference_id]);
+ } else {
+ address = reinterpret_cast<Address>(NoExternalReferencesCallback);
+ }
memcpy(current, &address, kPointerSize);
current++;
break;
@@ -702,9 +559,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(data);
+ case kAlignmentPrefix + 2: {
+ int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
@@ -783,10 +642,13 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
return true;
}
+template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
-Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
- Address current_object_address, byte data,
- bool write_barrier_needed) {
+Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
+ Object** current,
+ Address current_object_address,
+ byte data,
+ bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
@@ -795,7 +657,7 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
ReadObject(space_number, current);
emit_write_barrier = (space_number == NEW_SPACE);
} else {
- Object* new_object = NULL; /* May not be a real Object pointer. */
+ Object* new_object = nullptr; /* May not be a real Object pointer. */
if (where == kNewObject) {
ReadObject(space_number, &new_object);
} else if (where == kBackref) {
@@ -829,13 +691,13 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
new_object = *attached_objects_[index];
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
- DCHECK(where == kBuiltin);
+ DCHECK_EQ(where, kBuiltin);
int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
if (within == kInnerPointer) {
- DCHECK(how == kFromCode);
+ DCHECK_EQ(how, kFromCode);
if (where == kBuiltin) {
// At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
@@ -877,5 +739,9 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
return current;
}
+// Explicit instantiation.
+template class Deserializer<BuiltinDeserializerAllocator>;
+template class Deserializer<DefaultDeserializerAllocator>;
+
} // namespace internal
} // namespace v8