summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc44
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h10
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc20
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc452
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc23
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc57
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc213
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc19
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h12
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc102
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc18
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h3
13 files changed, 731 insertions, 244 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 4690c67289..a436827e1c 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1328,6 +1328,15 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(const Operand& op, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF6);
+ emit_operand(eax, op);
+ EMIT(imm8);
+}
+
+
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2221,6 +2230,40 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
}
+void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movntdq(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xE7);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::prefetch(const Operand& src, int level) {
+ ASSERT(is_uint2(level));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x18);
+ XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ emit_sse_operand(code, src);
+}
+
+
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2300,7 +2343,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 9ece74432b..76dfe7cfbb 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -637,6 +637,7 @@ class Assembler : public Malloced {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
+ void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, const Operand& src);
@@ -790,6 +791,15 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
+ // Parallel XMM operations.
+ void movntdqa(XMMRegister src, const Operand& dst);
+ void movntdq(const Operand& dst, XMMRegister src);
+ // Prefetch src position into cache level.
+ // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
+ // non-temporal
+ void prefetch(const Operand& src, int level);
+ // TODO(lrn): Need SFENCE for movnt?
+
// Debugging
void Print();
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 2275b7c3b9..3adb014b14 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -331,10 +331,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(greater_equal, &exit, not_taken);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit, not_taken);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -469,11 +467,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmp(ebx, Factory::undefined_value());
__ j(equal, &use_global_receiver);
+ // We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &convert_to_object);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
__ j(below_equal, &shift_arguments);
__ bind(&convert_to_object);
@@ -617,12 +615,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
+ // We don't use IsObjectJSObjectType here because we jump on success.
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &call_to_object);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(less_equal, &push_receiver);
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ __ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 95aeba6817..603f9135ff 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2624,9 +2624,8 @@ void CodeGenerator::Comparison(AstNode* node,
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
temp.Unuse();
operand.Unuse();
dest->Split(not_zero);
@@ -2720,11 +2719,9 @@ void CodeGenerator::Comparison(AstNode* node,
// left_side is a sequential ASCII string.
left_side = Result(left_reg);
right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
// Test string equality and comparison.
+ Label comparison_done;
if (cc == equal) {
- Label comparison_done;
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(not_equal, &comparison_done);
@@ -2732,34 +2729,25 @@ void CodeGenerator::Comparison(AstNode* node,
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
__ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
char_value);
- __ bind(&comparison_done);
} else {
- __ mov(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiUntag(temp2.reg());
- __ sub(Operand(temp2.reg()), Immediate(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzx_b(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ // If the length is 0 then the jump is taken and the flags
+ // correctly represent being less than the one-character string.
+ __ j(below, &comparison_done);
// Compare the first character of the string with the
// constant 1-character string.
uint8_t char_value =
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmp(Operand(temp2.reg()), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ char_value);
+ __ j(not_equal, &comparison_done);
// If the first character is the same then the long string sorts after
// the short one.
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
Immediate(Smi::FromInt(1)));
- __ bind(&characters_were_different);
}
- temp2.Unuse();
+ __ bind(&comparison_done);
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
@@ -4148,9 +4136,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// eax: value to be iterated over
__ test(eax, Immediate(kSmiTagMask));
primitive.Branch(zero);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
jsobject.Branch(above_equal);
primitive.Bind();
@@ -5762,26 +5748,66 @@ void CodeGenerator::VisitCall(Call* node) {
// Allocate a frame slot for the receiver.
frame_->Push(Factory::undefined_value());
+
+ // Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ // Result to hold the result of the function resolution and the
+ // final result of the eval call.
+ Result result;
+
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ JumpTarget done;
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
+ JumpTarget slow;
+ // Prepare the stack for the call to
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
+ // function, the first argument to the eval call and the
+ // receiver.
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ frame_->Push(&fun);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+ frame_->PushParameterAt(-1);
+
+ // Resolve the call.
+ result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+
+ done.Jump(&result);
+ slow.Bind();
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
+ // pushing the loaded function, the first argument to the eval
+ // call and the receiver.
frame_->PushElementAt(arg_count + 1);
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
frame_->Push(Factory::undefined_value());
}
-
- // Push the receiver.
frame_->PushParameterAt(-1);
// Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+ // If we generated fast-case code bind the jump-target where fast
+ // and slow case merge.
+ if (done.is_linked()) done.Bind(&result);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@@ -5949,18 +5975,31 @@ void CodeGenerator::VisitCall(Call* node) {
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
} else {
+ // Push the receiver onto the frame.
Load(property->obj());
- frame()->Dup();
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+
+ // Load the name of the function.
Load(property->key());
- Result function = EmitKeyedLoad();
- Result receiver = frame_->Pop();
- frame_->Push(&function);
- frame_->Push(&receiver);
- }
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ frame_->Push(&result);
+ }
}
} else {
@@ -6317,14 +6356,15 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
ASSERT(map.is_valid());
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Do a range test for JSObject type. We can't use
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
+ // ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
obj.Unuse();
map.Unuse();
destination()->Split(below_equal);
@@ -6360,9 +6400,8 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(temp.is_valid());
__ mov(temp.reg(),
FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
obj.Unuse();
temp.Unuse();
destination()->Split(not_zero);
@@ -6436,20 +6475,16 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- { Result tmp = allocator()->Allocate();
- __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
- __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
- null.Branch(below);
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(below);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
- }
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
// Check if the constructor in the map is a function.
{ Result tmp = allocator()->Allocate();
@@ -7030,8 +7065,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// has no indexed interceptor.
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
deferred->Branch(below);
- __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
- __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+ KeyedLoadIC::kSlowCaseBitFieldMask);
deferred->Branch(not_zero);
// Check the object's elements are in fast case.
@@ -8285,10 +8320,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
+ __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
temp.Unuse();
answer.Unuse();
destination()->Split(below);
@@ -8310,9 +8345,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
// It can be an undetectable object.
frame_->Spill(answer.reg());
__ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ movzx_b(answer.reg(),
- FieldOperand(answer.reg(), Map::kBitFieldOffset));
- __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
answer.Unuse();
destination()->Split(not_zero);
@@ -8339,14 +8373,15 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(equal);
// It can be an undetectable object.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
destination()->false_target()->Branch(not_zero);
- __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ // Do a range test for JSObject type. We can't use
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
+ // ControlDestination, so we copy its implementation here.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
answer.Unuse();
map.Unuse();
destination()->Split(below_equal);
@@ -8766,6 +8801,9 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
+ // If key and receiver are shared registers on the frame, their values will
+ // be automatically saved and restored when going to deferred code.
+ // The result is in elements, which is guaranteed non-shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@@ -9270,20 +9308,19 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
// Undetectable => false.
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ebx, 1 << Map::kIsUndetectable);
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
__ j(not_zero, &false_result);
// JavaScript object => true.
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
__ j(above_equal, &true_result);
// String value => false iff empty.
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string);
- __ mov(edx, FieldOperand(eax, String::kLengthOffset));
ASSERT(kSmiTag == 0);
- __ test(edx, Operand(edx));
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -11739,13 +11776,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// Get the type of the first operand.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-
// If the first object is a JS object, we have done pointer comparison.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
@@ -11756,17 +11790,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
- __ cmp(ecx, ODDBALL_TYPE);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
- __ cmp(ecx, ODDBALL_TYPE);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
// Fall through to the general case.
@@ -12408,12 +12439,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
// Check that the left hand is a JS object.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
- __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow, not_taken);
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
@@ -12438,12 +12464,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Check that the function prototype is a JS object.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow, not_taken);
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
// Register mapping:
// eax is object map.
@@ -12638,7 +12659,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
- __ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -12652,9 +12672,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ mov(scratch_, eax);
}
- __ pop(result_);
__ pop(index_);
__ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
ASSERT(kSmiTag == 0);
@@ -12877,14 +12899,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ebx: length of resulting flat string as a smi
// edx: second string
Label non_ascii_string_add_flat_result;
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ test(ecx, Immediate(kAsciiStringTag));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
@@ -12925,8 +12945,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kAsciiStringTag);
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
@@ -13492,6 +13511,211 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
#undef __
+#define __ masm.
+
+MemCopyFunction CreateMemCopyFunction() {
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler masm(buffer, static_cast<int>(actual_size));
+
+ // Generated code is put into a fixed, unmovable, buffer, and not into
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
+ // (e.g. the JavaScript nan-object).
+
+ // 32-bit C declaration function calls pass arguments on stack.
+
+ // Stack layout:
+ // esp[12]: Third argument, size.
+ // esp[8]: Second argument, source pointer.
+ // esp[4]: First argument, destination pointer.
+ // esp[0]: return address
+
+ const int kDestinationOffset = 1 * kPointerSize;
+ const int kSourceOffset = 2 * kPointerSize;
+ const int kSizeOffset = 3 * kPointerSize;
+
+ int stack_offset = 0; // Update if we change the stack height.
+
+ if (FLAG_debug_code) {
+ __ cmp(Operand(esp, kSizeOffset + stack_offset),
+ Immediate(kMinComplexMemCopy));
+ Label ok;
+ __ j(greater_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope enable(SSE2);
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(Operand(edx), Immediate(16));
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
+
+ // edi is now aligned. Check if esi is also aligned.
+ Label unaligned_source;
+ __ test(Operand(src), Immediate(0x0F));
+ __ j(not_zero, &unaligned_source);
+ {
+ __ IncrementCounter(&Counters::memcopy_aligned, 1);
+ // Copy loop for aligned source and destination.
+ __ mov(edx, count);
+ Register loop_count = ecx;
+ Register count = edx;
+ __ shr(loop_count, 5);
+ {
+ // Main copy loop.
+ Label loop;
+ __ bind(&loop);
+ __ prefetch(Operand(src, 0x20), 1);
+ __ movdqa(xmm0, Operand(src, 0x00));
+ __ movdqa(xmm1, Operand(src, 0x10));
+ __ add(Operand(src), Immediate(0x20));
+
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ add(Operand(dst), Immediate(0x20));
+
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ }
+
+ // At most 31 bytes to copy.
+ Label move_less_16;
+ __ test(Operand(count), Immediate(0x10));
+ __ j(zero, &move_less_16);
+ __ movdqa(xmm0, Operand(src, 0));
+ __ add(Operand(src), Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ __ add(Operand(dst), Immediate(0x10));
+ __ bind(&move_less_16);
+
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ and_(count, 0xF);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+ __ Align(16);
+ {
+ // Copy loop for unaligned source and aligned destination.
+ // If source is not aligned, we can't read it as efficiently.
+ __ bind(&unaligned_source);
+ __ IncrementCounter(&Counters::memcopy_unaligned, 1);
+ __ mov(edx, ecx);
+ Register loop_count = ecx;
+ Register count = edx;
+ __ shr(loop_count, 5);
+ {
+ // Main copy loop
+ Label loop;
+ __ bind(&loop);
+ __ prefetch(Operand(src, 0x20), 1);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ add(Operand(src), Immediate(0x20));
+
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ add(Operand(dst), Immediate(0x20));
+
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ }
+
+ // At most 31 bytes to copy.
+ Label move_less_16;
+ __ test(Operand(count), Immediate(0x10));
+ __ j(zero, &move_less_16);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ add(Operand(src), Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ __ add(Operand(dst), Immediate(0x10));
+ __ bind(&move_less_16);
+
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ and_(count, 0x0F);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+
+ } else {
+ __ IncrementCounter(&Counters::memcopy_noxmm, 1);
+ // SSE2 not supported. Unlikely to happen in practice.
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ __ cld();
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ // Copy the first word.
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+
+ // Increment src,dstso that dst is aligned.
+ __ mov(edx, dst);
+ __ and_(edx, 0x03);
+ __ neg(edx);
+ __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
+ // edi is now aligned, ecx holds number of remaning bytes to copy.
+
+ __ mov(edx, count);
+ count = edx;
+ __ shr(ecx, 2); // Make word count instead of byte count.
+ __ rep_movs();
+
+ // At most 3 bytes left to copy. Copy 4 bytes at end of string.
+ __ and_(count, 3);
+ __ mov(eax, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, count, times_1, -4), eax);
+
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ // Call the function from C++.
+ return FUNCTION_CAST<MemCopyFunction>(buffer);
+}
+
+#undef __
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index ea182ab9cc..b3ddad9d9a 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -594,6 +594,8 @@ class CodeGenerator: public AstVisitor {
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 58c22afcd3..44afdd6ddb 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -817,6 +817,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
// Returns NULL if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
+ case 0x18: return "prefetch";
case 0xA2: return "cpuid";
case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
@@ -942,7 +943,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x0F:
{ byte f0byte = *(data+1);
const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0xA2 || f0byte == 0x31) {
+ if (f0byte == 0x18) {
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* suffix[] = {"nta", "1", "2", "3"};
+ AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
@@ -1070,6 +1077,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x2A) {
+ // movntdqa
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
} else {
UnimplementedInstruction();
}
@@ -1122,6 +1136,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xE7) {
+ AppendToBuffer("movntdq ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index ae64d023c0..28b766a572 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1726,6 +1726,29 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ VisitForValue(key, kAccumulator);
+ __ mov(ecx, eax);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
+ arg_count, in_loop);
+ __ call(ic, mode);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
@@ -1815,37 +1838,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
+ VisitForValue(prop->key(), kAccumulator);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
__ pop(edx); // We do not need to keep the receiver.
- } else {
- __ mov(edx, Operand(esp, 0)); // Keep receiver, to call function on.
- }
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- if (prop->is_synthetic()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call as it is treated specially
+ // by the LoadIC code.
+ __ nop();
// Push result (function).
__ push(eax);
// Push Global receiver.
__ mov(ecx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+ EmitCallWithStub(expr);
} else {
- // Pop receiver.
- __ pop(ebx);
- // Push result (function).
- __ push(eax);
- __ push(ebx);
+ EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
- EmitCallWithStub(expr);
}
} else {
// Call to some other expression. If the expression is an anonymous
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 2ba64dcae0..dc6bfc90de 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -305,8 +305,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string;
- Label check_pixel_array, probe_dictionary;
- Label check_number_dictionary;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@@ -316,8 +315,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check bit field.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(kSlowCaseBitFieldMask));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), kSlowCaseBitFieldMask);
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -329,8 +327,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
- // Get the elements array of the object.
__ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
@@ -405,13 +404,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
- __ test(ebx, Immediate(String::kIsArrayIndexMask));
- __ j(not_zero, &index_string, not_taken);
+ __ test(ebx, Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, &index_string, not_taken);
// Is the string a symbol?
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // ecx: key map.
ASSERT(kSymbolTag != 0);
- __ test(ebx, Immediate(kIsSymbolMask));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kIsSymbolMask);
__ j(zero, &slow, not_taken);
// If the receiver is a fast-case object, check the keyed lookup
@@ -453,14 +452,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ cmp(edi, Operand(ecx));
+ __ sub(edi, Operand(ecx));
__ j(above_equal, &slow);
// Load in-object property.
- __ sub(edi, Operand(ecx));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -487,10 +486,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&index_string);
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
+ // eax: key (string).
+ // ebx: hash field.
+ // edx: receiver.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(ebx, String::kArrayIndexValueMask);
__ shr(ebx, String::kHashShift - kSmiTagSize);
+ // Here we actually clobber the key (eax) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
__ mov(eax, ebx);
+ // Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -556,8 +562,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
- __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
__ CmpInstanceType(ecx, JS_OBJECT_TYPE);
@@ -681,7 +687,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp();
// Fall through to slow case.
- // Slow case: Load key and receiver from stack and jump to runtime.
+ // Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@@ -746,8 +752,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow, not_taken);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -864,8 +870,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -1038,22 +1044,21 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
+ // -- edx : receiver
// -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ Label number, non_number, non_string, boolean, probe;
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
@@ -1073,7 +1078,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for string.
__ bind(&non_number);
- __ cmp(ebx, FIRST_NONSTRING_TYPE);
+ __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &non_string, taken);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, edx);
@@ -1084,7 +1089,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ cmp(edx, Factory::true_value());
__ j(equal, &boolean, not_taken);
__ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, taken);
+ __ j(not_equal, miss, taken);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -1092,10 +1097,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm, argc);
}
@@ -1135,8 +1136,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+// The generated code never falls through.
+static void GenerateCallNormal(MacroAssembler* masm, int argc, Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1144,20 +1145,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, miss, not_taken);
// Check that the receiver is a valid JS object.
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
- __ j(below, &miss, not_taken);
+ __ j(below, miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -1171,10 +1172,10 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Accessing global object: Load and invoke.
__ bind(&global_object);
// Check that the global object does not require access checks.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss, not_taken);
- GenerateNormalHelper(masm, argc, true, &miss);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_equal, miss, not_taken);
+ GenerateNormalHelper(masm, argc, true, miss);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@@ -1183,24 +1184,20 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ j(equal, &global_proxy, not_taken);
// Check that the non-global, non-global-proxy object does not
// require access checks.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss, not_taken);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_equal, miss, not_taken);
__ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss);
+ GenerateNormalHelper(masm, argc, false, miss);
// Global object proxy access: Check access rights.
__ bind(&global_proxy);
- __ CheckAccessGlobalProxy(edx, eax, &miss);
+ __ CheckAccessGlobalProxy(edx, eax, miss);
__ jmp(&invoke);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm, argc);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1222,7 +1219,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(id))));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@@ -1253,6 +1250,106 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Label miss;
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ Label miss;
+ GenerateCallNormal(masm, argc, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ Label miss, skip_probe;
+
+ // Do not probe monomorphic cache if a key is a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(equal, &skip_probe, taken);
+
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC, &skip_probe);
+
+ __ bind(&skip_probe);
+
+ __ mov(eax, ecx);
+ __ EnterInternalFrame();
+ __ push(ecx);
+ __ call(Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_Generic)),
+ RelocInfo::CODE_TARGET);
+ __ pop(ecx);
+ __ LeaveInternalFrame();
+ __ mov(edi, eax);
+
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+
+ // Check that the receiver is a valid JS object.
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, eax);
+ __ j(below, &miss, not_taken);
+
+ // Check that the value is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+ __ j(not_equal, &miss, not_taken);
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ Label miss;
+ GenerateCallNormal(masm, argc, &miss);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@@ -1300,8 +1397,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ j(equal, &global, not_taken);
// Check for non-global object that requires access check.
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &miss, not_taken);
// Search the dictionary placing the result in eax.
@@ -1322,7 +1419,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ CheckAccessGlobalProxy(eax, edx, &miss);
__ jmp(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 5caa4c4186..b83f9bc75b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -296,6 +296,25 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
}
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
+ sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
+ cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ j(above, fail);
+}
+
+
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 387426ff94..2018721d6b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -188,6 +188,18 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
+ // Check if a heap object's type is in the JSObject range, not including
+ // JSFunction. The object's map will be loaded in the map register.
+ // Any or all of the three registers may be the same.
+ // The contents of the scratch register will always be overwritten.
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // The contents of the scratch register will be overwritten.
+ void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 5bb5be617f..48d9e67454 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -172,6 +172,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
}
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype) {
+ // Get the global function with the given index.
+ JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
@@ -1029,6 +1040,20 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ j(not_equal, miss, not_taken);
+ }
+}
+
+
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
@@ -1042,6 +1067,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1073,8 +1100,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
@@ -1102,6 +1128,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1219,8 +1247,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1248,6 +1275,8 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Label miss, return_undefined, call_builtin;
+ GenerateNameCheck(name, &miss);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1301,8 +1330,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1326,16 +1354,17 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
Label miss;
Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = ebx;
- Register index = ecx;
+ Register index = edi;
Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
@@ -1364,11 +1393,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
- Handle<Code> ic = ComputeCallMiss(argc);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1393,15 +1419,17 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
Label miss;
Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
Register receiver = eax;
- Register index = ecx;
+ Register index = edi;
Register scratch1 = ebx;
Register scratch2 = edx;
Register result = eax;
@@ -1433,10 +1461,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
__ bind(&miss);
// Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
- Handle<Code> ic = ComputeCallMiss(argc);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1469,6 +1495,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
Label miss_in_smi_check;
+ GenerateNameCheck(name, &miss_in_smi_check);
+
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1520,14 +1548,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss);
} else {
// Check that the object is a string or a symbol.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- __ cmp(eax, FIRST_NONSTRING_TYPE);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1546,9 +1571,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1568,9 +1592,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- eax);
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss);
}
@@ -1593,8 +1616,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
FreeSpaceForFastApiCall(masm(), eax);
}
__ bind(&miss_in_smi_check);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -1613,6 +1635,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1655,8 +1679,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(argc);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -1677,6 +1700,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
+ GenerateNameCheck(name, &miss);
+
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1739,8 +1764,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index e22df6ec28..36774da0dc 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -1119,6 +1119,24 @@ Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
}
+Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
+ int arg_count,
+ int loop_nesting) {
+ // Function name, arguments, and receiver are on top of the frame.
+ // The IC expects the name in ecx and the rest on the stack and
+ // drops them all.
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
+ // Spill args, receiver, and function. The call will drop args and
+ // receiver.
+ Result name = Pop();
+ PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
+ name.ToRegister(ecx);
+ name.Unuse();
+ return RawCallCodeObject(ic, mode);
+}
+
+
Result VirtualFrame::CallConstructor(int arg_count) {
// Arguments, receiver, and function are on top of the frame. The
// IC expects arg count in eax, function in edi, and the arguments
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 48d0fa2471..e00626b7d4 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -360,6 +360,9 @@ class VirtualFrame: public ZoneObject {
// include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+ // Call keyed call IC. Same calling convention as CallCallIC.
+ Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
// Allocate and call JS function as constructor. Arguments,
// receiver (global object), and function are found on top of the
// frame. Function is not dropped. The argument count does not