summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h16
-rw-r--r--deps/v8/src/x64/builtins-x64.cc75
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc106
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h11
-rw-r--r--deps/v8/src/x64/codegen-x64.cc219
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc16
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc175
-rw-r--r--deps/v8/src/x64/ic-x64.cc134
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc215
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h15
-rw-r--r--deps/v8/src/x64/lithium-x64.cc46
-rw-r--r--deps/v8/src/x64/lithium-x64.h32
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc88
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h25
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc5
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc681
16 files changed, 1321 insertions, 538 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 10f0b886da..f7b87ec041 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -238,12 +238,12 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (IsCodeTarget(rmode_)) {
Assembler::set_target_address_at(pc_, target);
Object* target_code = Code::GetCodeFromTargetAddress(target);
- if (host() != NULL) {
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
@@ -282,11 +282,13 @@ Address* RelocInfo::target_reference_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL && target->IsHeapObject()) {
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
@@ -310,12 +312,14 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL) {
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 79ddb1393e..8baa2f32ff 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -670,7 +670,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(FACTORY->undefined_value());
+ __ Push(masm->isolate()->factory()->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
@@ -993,10 +993,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
@@ -1007,9 +1003,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity >= 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ movq(scratch1, FieldOperand(array_function,
@@ -1033,9 +1029,10 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
@@ -1043,7 +1040,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
return;
}
@@ -1060,15 +1057,14 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
+ factory->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, FACTORY->the_hole_value());
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -1101,38 +1097,25 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
-
// Load the initial map from the array function.
__ movq(elements_array,
FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ testq(array_size, array_size);
- __ j(not_zero, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ testq(array_size, array_size);
+ __ Assert(not_zero, "array size is unexpectedly 0");
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
- __ bind(&not_empty);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1150,9 +1133,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
- __ bind(&allocated);
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, FACTORY->empty_fixed_array());
+ __ Move(elements_array, factory->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1171,16 +1154,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- FACTORY->fixed_array_map());
- Label not_empty_2, fill_array;
- __ SmiTest(array_size);
- __ j(not_zero, &not_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
- Smi::FromInt(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(&not_empty_2);
+ factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1189,10 +1163,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- __ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, FACTORY->the_hole_value());
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
@@ -1222,12 +1195,13 @@ static void AllocateJSArray(MacroAssembler* masm,
// a construct call and a normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more;
+ Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array;
// Check for array construction with zero arguments.
__ testq(rax, rax);
__ j(not_zero, &argc_one_or_more);
+ __ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
rdi,
@@ -1235,7 +1209,6 @@ static void ArrayNativeCode(MacroAssembler* masm,
rcx,
rdx,
r8,
- kPreallocatedArrayElements,
call_generic_code);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->array_function_native(), 1);
@@ -1248,6 +1221,16 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+
+ __ SmiTest(rdx);
+ __ j(not_zero, &not_empty_array);
+ __ pop(r8); // Adjust stack.
+ __ Drop(1);
+ __ push(r8);
+ __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
+ __ jmp(&empty_array);
+
+ __ bind(&not_empty_array);
__ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 7d41ffe539..3dfebeec41 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -227,7 +227,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [rsp + (3 * kPointerSize)]: literals array.
// All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int elements_size = 0;
+ if (length_ > 0) {
+ elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length_)
+ : FixedArray::SizeFor(length_);
+ }
int size = JSArray::kSize + elements_size;
// Load boilerplate object into rcx and check if we need to create a
@@ -247,6 +252,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
@@ -280,9 +288,24 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
// Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
+ if (mode_ == CLONE_ELEMENTS) {
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ } else {
+ ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS);
+ int i;
+ for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ while (i < elements_size) {
+ __ movsd(xmm0, FieldOperand(rcx, i));
+ __ movsd(FieldOperand(rdx, i), xmm0);
+ i += kDoubleSize;
+ }
+ ASSERT(i == elements_size);
}
}
@@ -3879,7 +3902,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
}
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
@@ -5438,7 +5461,68 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0) {
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+ __ decl(index);
+ __ and_(index,
+ Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ movq(entity_name, Operand(properties,
+ index,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss);
+
+ // Check if the entry name is not a symbol.
+ __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, miss);
+ }
+
+ StringDictionaryLookupStub stub(properties,
+ r0,
+ r0,
+ StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ __ Push(Handle<Object>(name));
+ __ push(Immediate(name->Hash()));
+ __ CallStub(&stub);
+ __ testq(r0, r0);
+ __ j(not_zero, miss);
+ __ jmp(done);
+}
+
+
+// TODO(kmillikin): Eliminate this function when the stub cache is fully
+// handlified.
+MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
@@ -5665,6 +5749,15 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ rbx, rdx, rcx, EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ rdi, rdx, rcx, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { rdx, rbx, rdi, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { rdx, r11, r15, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { r11, rax, r15, EMIT_REMEMBERED_SET},
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
@@ -5912,7 +6005,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 698ba403cd..ffa3f4d202 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -423,7 +423,16 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0);
+
+ // TODO(kmillikin): Eliminate this function when the stub cache is fully
+ // handlified.
+ MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f6102c7c73..4c216e8f2c 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -143,6 +144,224 @@ ModuloFunction CreateModuloFunction() {
#endif
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // The fail label is not actually used since we do not allocate.
+ Label allocated, cow_array;
+
+ // Check backing store for COW-ness. If the negative case, we do not have to
+ // allocate a new array, since FixedArray and FixedDoubleArray do not differ
+ // in size.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &cow_array);
+ __ movq(r14, r8); // Destination array equals source array.
+
+ __ bind(&allocated);
+ // r8 : source FixedArray
+ // r9 : elements array length
+ // r14: destination FixedDoubleArray
+ // Set backing store's map
+ __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Convert smis to doubles and holes to hole NaNs. The Array's length
+ // remains unchanged.
+ STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+ Label loop, entry, convert_hole;
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ // r15: the-hole NaN
+ __ jmp(&entry);
+
+ // Allocate new array if the source array is a COW array.
+ __ bind(&cow_array);
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ // Set receiver's backing store.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movq(r11, r14);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Set backing store's length.
+ __ Integer32ToSmi(r11, r9);
+ __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ jmp(&allocated);
+
+ // Conversion loop.
+ __ bind(&loop);
+ __ decq(r9);
+ __ movq(rbx,
+ FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+ // r9 : current element's index
+ // rbx: current element (smi-tagged)
+ __ JumpIfNotSmi(rbx, &convert_hole);
+ __ SmiToInteger32(rbx, rbx);
+ __ cvtlsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ jmp(&entry);
+ __ bind(&convert_hole);
+ __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
+ __ bind(&entry);
+ __ testq(r9, r9);
+ __ j(not_zero, &loop);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ __ push(rax);
+
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ // r8 : source FixedDoubleArray
+ // r9 : number of elements
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ // r11: destination FixedArray
+ __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ Integer32ToSmi(r14, r9);
+ __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+
+ // Prepare for conversion loop.
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
+ // rsi: the-hole NaN
+ // rdi: pointer to the-hole
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ __ bind(&loop);
+ __ decq(r9);
+ __ movq(r14, FieldOperand(r8,
+ r9,
+ times_pointer_size,
+ FixedDoubleArray::kHeaderSize));
+ // r9 : current element's index
+ // r14: current element
+ __ cmpq(r14, rsi);
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(rax, r15, &gc_required);
+ // rax: new heap number
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rax);
+ __ movq(r15, r9);
+ __ RecordWriteArray(r11,
+ rax,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rdi);
+
+ __ bind(&entry);
+ __ testq(r9, r9);
+ __ j(not_zero, &loop);
+
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
#undef __
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index b7e334ee75..d0a052b42d 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -258,16 +258,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
- RelocInfo rinfo(call_target_address,
- RelocInfo::CODE_TARGET,
- 0,
- unoptimized_code);
- unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- unoptimized_code, &rinfo, replacement_code);
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -282,8 +279,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
- check_code->GetHeap()->incremental_marking()->
- RecordCodeTargetPatch(call_target_address, check_code);
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, check_code);
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index b5c5fc5e71..bf640dbcb1 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -254,7 +254,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -684,6 +687,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init =
+ mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -695,7 +700,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ movq(StackOperand(variable), result_register());
- } else if (mode == CONST || mode == LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(StackOperand(variable), kScratchRegister);
@@ -728,7 +733,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == CONST || mode == LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -741,9 +746,13 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ push(rsi);
__ Push(variable->name());
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == VAR || mode == CONST || mode == LET);
- PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr =
+ (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -751,7 +760,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// must not destroy the current value.
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == CONST || mode == LET) {
+ } else if (binding_needs_init) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
@@ -890,11 +899,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(rax);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ __ j(below_equal, &call_runtime);
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
Register empty_fixed_array_value = r8;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r9;
@@ -970,9 +985,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register rax. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ Push(Smi::FromInt(0)); // Map (0) - force slow check.
- __ push(rax);
+ __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
+ __ j(above, &non_proxy);
+ __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(rbx); // Smi
+ __ push(rax); // Array
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
@@ -991,17 +1014,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
index.scale,
FixedArray::kHeaderSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ Cmp(rdx, Smi::FromInt(0));
+ __ j(equal, &update_each, Label::kNear);
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1055,7 +1083,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->strict_mode_flag());
__ Push(info);
__ CallStub(&stub);
} else {
@@ -1085,7 +1113,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1099,7 +1127,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1145,7 +1173,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1182,12 +1210,14 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST || local->mode() == LET) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
if (local->mode() == CONST) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET
+ } else { // LET || CONST_HARMONY
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
@@ -1221,7 +1251,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->mode() != LET && var->mode() != CONST) {
+ if (!var->binding_needs_init()) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
@@ -1229,10 +1259,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET) {
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // CONST
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1417,12 +1451,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_elements());
- if (expr->constant_elements()->map() ==
+ __ Push(constant_elements);
+ if (constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1433,8 +1473,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1459,22 +1505,59 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Store the subexpression value in the array's elements.
__ movq(r8, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rdi, FieldOperand(r8, JSObject::kMapOffset));
__ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), result_register());
- Label no_map_change;
- __ JumpIfSmi(result_register(), &no_map_change);
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+ __ CheckFastElements(rdi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(result_register(), &smi_element);
+ __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ __ push(r8); // Copy of array literal.
+ __ Push(Smi::FromInt(i));
+ __ push(result_register());
+ __ Push(Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(Smi::FromInt(strict_mode_flag())); // Strict mode.
+ __ CallRuntime(Runtime::kSetProperty, 5);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ movq(rcx, Immediate(i));
+ __ StoreNumberToDoubleElements(result_register(),
+ rbx,
+ rcx,
+ xmm0,
+ &slow_elements);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ movq(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
__ RecordWriteField(rbx, offset, result_register(), rcx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
- __ CheckFastSmiOnlyElements(rdi, &no_map_change, Label::kNear);
- __ push(r8);
- __ CallRuntime(Runtime::kNonSmiElementStored, 1);
- __ bind(&no_map_change);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ movq(FieldOperand(rbx, offset), result_register());
+ // Fall through
+
+ __ bind(&element_done);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1805,8 +1888,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
}
- } else if (var->mode() != CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -2657,9 +2741,12 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
+ __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+
#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
+ __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
#endif
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -3997,33 +4084,25 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cc = equal;
- __ pop(rdx);
break;
case Token::LT:
cc = less;
- __ pop(rdx);
break;
case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = greater;
break;
case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = less_equal;
break;
case Token::GTE:
cc = greater_equal;
- __ pop(rdx);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 27a96674cf..e8ab06cdab 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -712,12 +712,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Writing a non-smi, check whether array allows non-smi elements.
// r9: receiver's map
__ CheckFastObjectElements(r9, &slow, Label::kNear);
- __ lea(rcx,
- FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- __ movq(rdx, rax);
- __ RecordWrite(
- rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ movq(rdx, rax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ bind(&fast_double_with_map_check);
@@ -736,10 +735,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -749,7 +748,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -822,7 +821,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -849,10 +848,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -910,7 +909,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -942,39 +941,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1102,27 +1068,12 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(rcx, &miss);
Condition cond = masm->IsObjectStringType(rcx, rax, rax);
__ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1602,6 +1553,51 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
#undef __
@@ -1613,11 +1609,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return less;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
+ return greater;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
+ return less_equal;
case Token::GTE:
return greater_equal;
default:
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 45aaad7549..38a8c18be3 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -374,6 +374,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -1526,39 +1532,51 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (left->IsRegister()) {
- __ cmpl(ToRegister(left), Immediate(value));
- } else {
- __ cmpl(ToOperand(left), Immediate(value));
- }
- } else if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
} else {
- EmitCmpI(left, right);
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ int32_t value;
+ if (right->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(right));
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else if (left->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(left));
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(right), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(right), Immediate(value));
+ }
+ // We transposed the operands. Reverse the condition.
+ cc = ReverseCondition(cc);
+ } else {
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpl(ToRegister(left), ToOperand(right));
+ }
+ }
+ }
+ EmitBranch(true_block, false_block, cc);
}
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
}
@@ -1979,9 +1997,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
Label true_value, done;
__ testq(rax, rax);
__ j(condition, &true_value, Label::kNear);
@@ -2055,19 +2070,24 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ movq(Operand(address, 0), value);
- Label smi_store;
- __ JumpIfSmi(value, &smi_store, Label::kNear);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Label smi_store;
+ HType type = instr->hydrogen()->value()->type();
+ if (!type.IsHeapNumber() && !type.IsString() && !type.IsNonPrimitive()) {
+ __ JumpIfSmi(value, &smi_store, Label::kNear);
+ }
- int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
- __ lea(object, Operand(address, -offset));
- // Cells are always in the remembered set.
- __ RecordWrite(object,
- address,
- value,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ bind(&smi_store);
+ int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
+ __ lea(object, Operand(address, -offset));
+ // Cells are always in the remembered set.
+ __ RecordWrite(object,
+ address,
+ value,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ bind(&smi_store);
+ }
}
@@ -2094,10 +2114,19 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ movq(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->TempAt(0));
- __ RecordWriteContextSlot(context, offset, value, scratch, kSaveFPRegs);
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ scratch,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -2118,7 +2147,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2561,7 +2590,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -3061,21 +3090,36 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
+ __ RecordWriteField(temp,
+ offset,
+ value,
+ object,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -3182,12 +3226,20 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value, kSaveFPRegs);
+ __ RecordWrite(elements,
+ key,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3223,6 +3275,47 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, &not_applicable);
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ // Write barrier.
+ ASSERT_NE(instr->temp_reg(), NULL);
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
@@ -3825,6 +3918,11 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
// Setup the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
@@ -3845,7 +3943,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3934,8 +4034,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->strict_mode_flag());
__ Push(shared_info);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -3975,12 +4074,11 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ Condition final_branch_condition =
+ EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ if (final_branch_condition != no_condition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -4048,7 +4146,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = zero;
} else {
- final_branch_condition = never;
__ jmp(false_label);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 106d7bb2e5..f3cb667973 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -77,6 +77,7 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -125,8 +126,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->strict_mode_flag();
}
LChunk* chunk() const { return chunk_; }
@@ -190,9 +191,8 @@ class LCodeGen BASE_EMBEDDED {
int argc,
LInstruction* instr);
-
// Generate a direct call to a known function. Expects the function
- // to be in edi.
+ // to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
@@ -251,7 +251,6 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
@@ -260,8 +259,10 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index a67a59320e..0af2ce4fc4 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -447,6 +447,12 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
@@ -1396,12 +1402,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1413,15 +1417,22 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left;
+ LOperand* right;
+ if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+ left = UseRegisterOrConstantAtStart(instr->left());
+ right = UseRegisterOrConstantAtStart(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
return new LCmpIDAndBranch(left, right);
}
}
@@ -1956,6 +1967,27 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, temp_reg);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), rax);
+ LOperand* fixed_object_reg = FixedTemp(rdx);
+ LOperand* new_map_reg = FixedTemp(rbx);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d43a86a9a5..20a69373c1 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -162,6 +162,7 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -1260,7 +1261,6 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1277,7 +1277,9 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1551,7 +1553,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1571,7 +1572,8 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ bool strict_mode() { return strict_mode_flag() == kStrictMode; }
};
@@ -1660,6 +1662,30 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
};
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7fe6d5821e..e3d463400f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -55,7 +55,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_address());
+ reinterpret_cast<Address>(isolate->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
@@ -326,6 +326,40 @@ void MacroAssembler::RecordWriteField(
}
+void MacroAssembler::RecordWriteArray(Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Array access: calculate the destination address. Index is not a smi.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
+}
+
+
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value,
@@ -2317,6 +2351,13 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
}
+void MacroAssembler::TestBit(const Operand& src, int bits) {
+ int byte_offset = bits / kBitsPerByte;
+ int bit_in_byte = bits & (kBitsPerByte - 1);
+ testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -2683,7 +2724,7 @@ void MacroAssembler::CheckFastSmiOnlyElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(
Register maybe_number,
Register elements,
- Register key,
+ Register index,
XMMRegister xmm_scratch,
Label* fail) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
@@ -2704,7 +2745,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
jmp(&done);
@@ -2727,7 +2768,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
bind(&done);
}
@@ -2866,7 +2907,8 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
testl(function, Immediate(kSmiTagMask));
j(zero, miss);
@@ -2875,6 +2917,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
+ if (miss_on_bound_function) {
+ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
+ // field).
+ TestBit(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset),
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -3067,29 +3120,16 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- if (V8::UseCrankshaft()) {
- // Since Crankshaft can recompile a function, we need to load
- // the Code object every time we call the function.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
- } else {
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code,
- expected,
- actual,
- RelocInfo::CODE_TARGET,
- flag,
- call_wrapper,
- call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 7e0ba00546..f5f81b1316 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -256,8 +256,8 @@ class MacroAssembler: public Assembler {
// Notify the garbage collector that we wrote a pointer into a fixed array.
// |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
+ // object being stored. |index| is the array index represented as a non-smi.
+ // All registers are clobbered by the operation RecordWriteArray
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWriteArray(
@@ -319,9 +319,9 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void InitializeRootRegister() {
- ExternalReference roots_address =
- ExternalReference::roots_address(isolate());
- movq(kRootRegister, roots_address);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ movq(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
}
@@ -726,6 +726,7 @@ class MacroAssembler: public Assembler {
void Push(Smi* smi);
void Test(const Operand& dst, Smi* source);
+
// ---------------------------------------------------------------------------
// String macros.
@@ -771,6 +772,9 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Bit-field support.
+ void TestBit(const Operand& dst, int bit_index);
+
// Handle support
void Move(Register dst, Handle<Object> source);
void Move(const Operand& dst, Handle<Object> source);
@@ -860,12 +864,12 @@ class MacroAssembler: public Assembler {
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- // Note that key must not be smi-tagged.
+ // FastDoubleElements. If it can, store it at the index specified by index in
+ // the FastDoubleElements array elements, otherwise jump to fail. Note that
+ // index must not be smi-tagged.
void StoreNumberToDoubleElements(Register maybe_number,
Register elements,
- Register key,
+ Register index,
XMMRegister xmm_scratch,
Label* fail);
@@ -1074,7 +1078,8 @@ class MacroAssembler: public Assembler {
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Generates code for reporting that an illegal operation has
// occurred.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 55fabc0036..1e0cd6a38c 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1248,6 +1248,11 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index c4b2672f60..8af1bf2c4c 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -82,7 +82,55 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register r0,
+ Register r1) {
+ ASSERT(name->IsSymbol());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+ __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ testb(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = r0;
+ __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, miss_label);
+
+ Label done;
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+// TODO(kmillikin): Eliminate this function when the stub cache is fully
+// handlified.
+MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
MacroAssembler* masm,
Label* miss_label,
Register receiver,
@@ -118,7 +166,7 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
masm,
miss_label,
&done,
@@ -312,8 +360,10 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -700,15 +750,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -723,9 +768,9 @@ void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -748,12 +793,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch); // Return address.
__ push(receiver_reg);
- __ Push(Handle<Map>(transition));
+ __ Push(transition);
__ push(rax);
__ push(scratch);
__ TailCallExternalReference(
@@ -764,11 +809,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
+ __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -808,7 +852,24 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Move(scratch, cell);
+ __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, miss);
+}
+
+
+// TODO(kmillikin): Eliminate this function when the stub cache is fully
+// handlified.
+MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
MacroAssembler* masm,
GlobalObject* global,
String* name,
@@ -828,10 +889,172 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
}
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ if (current->IsGlobalObject()) {
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
+ }
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+ }
+}
+
+
+// TODO(kmillikin): Eliminate this function when the stub cache is fully
+// handlified.
+MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ JSObject* current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
+ // Returns a cell or a failure.
+ MaybeObject* result = TryGenerateCheckPropertyCell(
+ masm,
+ GlobalObject::cast(current),
+ name,
+ scratch,
+ miss);
+ if (result->IsFailure()) return result;
+ }
+ ASSERT(current->IsJSObject());
+ current = JSObject::cast(current->GetPrototype());
+ }
+ return NULL;
+}
+
+
#undef __
#define __ ACCESS_MASM((masm()))
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<String> name,
+ int save_at_depth,
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg. On the first
+ // iteration, reg is an alias for object_reg, on later iterations,
+ // it is an alias for holder_reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), object_reg);
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ name = factory()->LookupSymbol(name);
+ }
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ Handle<Map> current_map(current->map());
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ Cmp(scratch1, current_map);
+ } else {
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), current_map);
+ }
+ // Branch on the result of the map check.
+ __ j(not_equal, miss);
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Move(reg, prototype);
+ }
+ }
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), reg);
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+ ASSERT(current.is_identical_to(holder));
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
+ __ j(not_equal, miss);
+
+ // Perform security check for access to the global object.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
@@ -882,12 +1105,13 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
+ MaybeObject* negative_lookup =
+ TryGenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
if (negative_lookup->IsFailure()) {
set_failure(Failure::cast(negative_lookup));
return reg;
@@ -960,43 +1184,34 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(current),
- name,
- scratch1,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return reg;
- }
- }
- current = JSObject::cast(current->GetPrototype());
- }
+ MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
+ object,
+ holder,
+ name,
+ scratch1,
+ miss);
+ if (result->IsFailure()) set_failure(Failure::cast(result));
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -1081,24 +1296,24 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ Move(rax, Handle<Object>(value));
+ __ Move(rax, value);
__ ret(0);
}
@@ -1198,7 +1413,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
@@ -1244,9 +1460,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, miss);
}
}
@@ -1305,11 +1521,22 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+// TODO(kmillikin): Eliminate this function when the stub cache is fully
+// handlified.
+MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_state_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1317,10 +1544,10 @@ MaybeObject* CallStubCompiler::GenerateMissBranch() {
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1360,7 +1587,7 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1368,8 +1595,7 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@@ -1394,7 +1620,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1488,8 +1714,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(rdi, &no_fast_elements_check);
- __ movq(rsi, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rsi, &call_builtin, Label::kFar);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
@@ -1553,11 +1779,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -1579,7 +1805,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1636,11 +1862,11 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -1669,12 +1895,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
+ GenerateNameCheck(Handle<String>(name), &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1720,11 +1946,11 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -1753,12 +1979,12 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
+ GenerateNameCheck(Handle<String>(name), &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1806,11 +2032,11 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -1835,7 +2061,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
if (cell == NULL) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
@@ -1871,7 +2097,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1879,11 +2105,11 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
}
@@ -1917,7 +2143,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
if (cell == NULL) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
@@ -1988,7 +2214,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1996,11 +2222,11 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
}
@@ -2023,7 +2249,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
+ GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2055,11 +2281,11 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -2089,7 +2315,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2186,7 +2412,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2194,11 +2420,11 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(function);
+ return TryGetCode(function);
}
@@ -2216,18 +2442,18 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2257,7 +2483,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2265,11 +2491,11 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return TryGetCode(INTERCEPTOR, name);
}
@@ -2299,7 +2525,7 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(Handle<String>(name), &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -2320,39 +2546,32 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
+
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
+ MaybeObject* maybe_result = TryGenerateMissBranch();
if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
- return GetCode(NORMAL, name);
+ return TryGetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2362,12 +2581,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
Label miss;
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2375,13 +2589,14 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2409,7 +2624,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ pop(rbx); // remove the return address
__ push(rdx); // receiver
- __ Push(Handle<AccessorInfo>(callback)); // callback info
+ __ Push(callback); // callback info
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // restore return address
@@ -2429,8 +2644,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2478,9 +2694,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2495,7 +2712,7 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ j(not_equal, &miss);
// Compute the cell operand to use.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rbx, cell);
Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
// Check that the value in the cell is not the hole. If it is, this
@@ -2539,10 +2756,10 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2555,16 +2772,11 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, &miss);
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2573,40 +2785,38 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
+
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2620,17 +2830,14 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(i));
- __ Cmp(rdi, map);
- if (transitioned_maps->at(i) == NULL) {
- __ j(equal, Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET);
+ __ Cmp(rdi, receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ movq(rbx,
- Handle<Map>(transitioned_maps->at(i)),
- RelocInfo::EMBEDDED_OBJECT);
- __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET);
+ __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
@@ -2640,13 +2847,13 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2665,15 +2872,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- rdx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2685,14 +2885,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2731,14 +2931,14 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return TryGetCode(CALLBACKS, name);
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2765,7 +2965,7 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// TODO(368): Compile in the whole chain: all the interceptors in
@@ -2785,15 +2985,16 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return TryGetCode(INTERCEPTOR, name);
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2804,7 +3005,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(rax, &miss);
}
@@ -2812,7 +3013,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rbx, cell);
__ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2838,9 +3039,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- rax : key
@@ -2853,7 +3054,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2899,14 +3100,15 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return TryGetCode(CALLBACKS, name);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2949,7 +3151,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
@@ -2966,11 +3168,12 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return TryGetCode(INTERCEPTOR, name);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2982,7 +3185,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2995,7 +3198,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3007,7 +3211,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -3020,7 +3224,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3032,7 +3237,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -3045,32 +3250,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3084,18 +3286,15 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(current));
- __ Cmp(map_reg, map);
- __ j(equal,
- Handle<Code>(handler_ics->at(current)),
- RelocInfo::CODE_TARGET);
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
}
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}