summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc45
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc8
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc196
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h11
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc351
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc2
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc23
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc28
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc373
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc15
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc512
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h34
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc678
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h859
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc98
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h23
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc6
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc1
19 files changed, 1855 insertions, 1412 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index c173a3dc5e..552d7b5eea 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2465,6 +2465,17 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
}
+void Assembler::por(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xEB);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2489,6 +2500,40 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
}
+void Assembler::psllq(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF3);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrlq(XMMRegister reg, int8_t shift) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(edx, reg); // edx == 2
+ EMIT(shift);
+}
+
+
+void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD3);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 11acb56110..20446b0085 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -919,9 +919,13 @@ class Assembler : public Malloced {
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
+ void por(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t shift);
+ void psllq(XMMRegister dst, XMMRegister src);
+ void psrlq(XMMRegister reg, int8_t shift);
+ void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 918f346d89..0a3e093056 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -399,7 +399,7 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Clear the context before we push it when entering the JS frame.
- __ xor_(esi, Operand(esi)); // clear esi
+ __ Set(esi, Immediate(0));
// Enter an internal frame.
__ EnterInternalFrame();
@@ -421,7 +421,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
Label loop, entry;
- __ xor_(ecx, Operand(ecx)); // clear ecx
+ __ Set(ecx, Immediate(0));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
@@ -644,7 +644,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&non_function);
__ mov(Operand(esp, eax, times_4, 0), edi);
// Clear edi to indicate a non-function being called.
- __ xor_(edi, Operand(edi));
+ __ Set(edi, Immediate(0));
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -665,7 +665,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function;
__ test(edi, Operand(edi));
__ j(not_zero, &function, taken);
- __ xor_(ebx, Operand(ebx));
+ __ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a371c96393..72213dc817 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -104,7 +104,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
- __ xor_(ebx, Operand(ebx)); // Set to NULL.
+ __ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
@@ -1772,7 +1772,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
@@ -2016,8 +2015,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
- operands_type_ == TRBinaryOpIC::INT32);
+ ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
// Floating point case.
switch (op_) {
@@ -4303,7 +4301,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// that contains the exponent and high bit of the mantissa.
STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
+ __ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
__ add(edx, Operand(edx));
@@ -4433,7 +4431,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken);
- __ xor_(eax, Operand(eax));
+ __ Set(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
@@ -4646,7 +4644,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
- __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
+ __ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
NearLabel skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
@@ -4799,7 +4797,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
}
// Clear the context pointer.
- __ xor_(esi, Operand(esi));
+ __ Set(esi, Immediate(0));
// Restore fp from handler and discard handler state.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
@@ -4973,7 +4971,26 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
+// Generate stub code for instanceof.
+// This code can patch a call site inlined cache of the instance of check,
+// which looks like this.
+//
+// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
+// 75 0a jne <some near label>
+// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
+//
+// If call site patching is requested the stack will have the delta from the
+// return address to the cmp instruction just below the return address. This
+// also means that call site patching can only take place with arguments in
+// registers. TOS looks like this when call site patching is requested
+//
+// esp[0] : return address
+// esp[4] : delta from return address to cmp instruction
+//
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+
// Fixed register usage throughout the stub.
Register object = eax; // Object (lhs).
Register map = ebx; // Map of the object.
@@ -4981,9 +4998,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Register prototype = edi; // Prototype of the function.
Register scratch = ecx;
+ // Constants describing the call site code to patch.
+ static const int kDeltaToCmpImmediate = 2;
+ static const int kDeltaToMov = 8;
+ static const int kDeltaToMovImmediate = 9;
+ static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
+ static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
+ static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+
+ ExternalReference roots_address = ExternalReference::roots_address();
+
+ ASSERT_EQ(object.code(), InstanceofStub::left().code());
+ ASSERT_EQ(function.code(), InstanceofStub::right().code());
+
// Get the object and function - they are always both needed.
Label slow, not_js_object;
- if (!args_in_registers()) {
+ if (!HasArgsInRegisters()) {
__ mov(object, Operand(esp, 2 * kPointerSize));
__ mov(function, Operand(esp, 1 * kPointerSize));
}
@@ -4993,22 +5023,26 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &not_js_object, not_taken);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
- // Look up the function and the map in the instanceof cache.
- NearLabel miss;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
- __ IncrementCounter(&Counters::instance_of_cache, 1);
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ // Look up the function and the map in the instanceof cache.
+ NearLabel miss;
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(function,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(map, Operand::StaticArray(
+ scratch, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(
+ scratch, times_pointer_size, roots_address));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ __ bind(&miss);
+ }
- __ bind(&miss);
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
@@ -5017,13 +5051,29 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
- // Update the golbal instanceof cache with the current map and function. The
- // cached answer will be set when it is known.
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
function);
+ } else {
+ // The constants for the code patching are based on no push instructions
+ // at the call site.
+ ASSERT(HasArgsInRegisters());
+ // Get return address and delta to inlined map check.
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
+ __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
+ }
+ __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
+ }
// Loop through the prototype chain of the object looking for the function
// prototype.
@@ -5039,18 +5089,48 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ jmp(&loop);
__ bind(&is_instance);
- __ IncrementCounter(&Counters::instance_of_stub_true, 1);
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ __ Set(eax, Immediate(0));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(scratch,
+ times_pointer_size, roots_address), eax);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, Factory::true_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Set(eax, Immediate(0));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&is_not_instance);
- __ IncrementCounter(&Counters::instance_of_stub_false, 1);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(
+ scratch, times_pointer_size, roots_address), eax);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, Factory::false_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
@@ -5064,39 +5144,61 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Null is not instance of anything.
__ cmp(object, Factory::null_value());
__ j(not_equal, &object_not_null);
- __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &object_not_null_or_smi, not_taken);
__ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow);
- __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
- if (args_in_registers()) {
- // Push arguments below return address.
- __ pop(scratch);
+ if (!ReturnTrueFalseObject()) {
+ // Tail call the builtin which returns 0 or 1.
+ if (HasArgsInRegisters()) {
+ // Push arguments below return address.
+ __ pop(scratch);
+ __ push(object);
+ __ push(function);
+ __ push(scratch);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ // Call the builtin and convert 0/1 to true/false.
+ __ EnterInternalFrame();
__ push(object);
__ push(function);
- __ push(scratch);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ NearLabel true_value, done;
+ __ test(eax, Operand(eax));
+ __ j(zero, &true_value);
+ __ mov(eax, Factory::false_value());
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ mov(eax, Factory::true_value());
+ __ bind(&done);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
}
- __ IncrementCounter(&Counters::instance_of_slow, 1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
+Register InstanceofStub::left() { return eax; }
+
+
+Register InstanceofStub::right() { return edx; }
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index f66a8c7e45..4a56d0d143 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -250,13 +250,6 @@ class TypeRecordingBinaryOpStub: public CodeStub {
result_type_(result_type),
name_(NULL) { }
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
@@ -321,10 +314,6 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 2f14e82e14..1ecfd39ca1 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -745,10 +745,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
Comment cmnt(masm_, "[ store arguments object");
if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
+ // When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- frame_->Push(Factory::the_hole_value());
+ frame_->Push(Factory::arguments_marker());
} else {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
@@ -773,9 +773,9 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
- skip_arguments = !probe.handle()->IsTheHole();
+ skip_arguments = !probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
probe.Unuse();
done.Branch(not_equal);
}
@@ -3294,9 +3294,9 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
Label slow, done;
bool try_lazy = true;
if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
+ try_lazy = probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
probe.Unuse();
__ j(not_equal, &slow);
}
@@ -5068,7 +5068,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// object has been lazily loaded yet.
Result result = frame()->Pop();
if (result.is_constant()) {
- if (result.handle()->IsTheHole()) {
+ if (result.handle()->IsArgumentsMarker()) {
result = StoreArgumentsObject(false);
}
frame()->Push(&result);
@@ -5079,7 +5079,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker()));
frame()->Push(&result);
exit.Branch(not_equal);
@@ -6649,38 +6649,41 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop, loop_condition,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+
ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
Load(args->at(1));
+ // Load this to eax (= array)
Load(args->at(0));
Result array_result = frame_->Pop();
array_result.ToRegister(eax);
frame_->SpillAll();
- Label bailout;
- Label done;
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
- Register result_pos = no_reg;
+ Register elements = no_reg; // Will be eax.
- Register index = edi;
+ Register index = edx;
- Register current_string_length = ecx; // Will be ecx when live.
+ Register string_length = ecx;
- Register current_string = edx;
+ Register string = esi;
Register scratch = ebx;
- Register scratch_2 = esi;
- Register new_padding_chars = scratch_2;
-
- Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
- Operand elements = Operand(esp, 3 * kPointerSize);
- Operand result = Operand(esp, 2 * kPointerSize);
- Operand padding_chars = Operand(esp, 1 * kPointerSize);
- Operand array_length = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(4 * kPointerSize));
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
- // Check that eax is a JSArray
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -6691,140 +6694,226 @@ void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
- // If the array is empty, return the empty string.
- __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(scratch, 1);
- Label non_trivial;
- __ j(not_zero, &non_trivial);
- __ mov(result, Factory::empty_string());
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ sar(array_length, 1);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, Factory::empty_string());
__ jmp(&done);
- __ bind(&non_trivial);
- __ mov(array_length, scratch);
-
- __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
- __ mov(elements, scratch);
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+ // Save the FixedArray containing array's elements.
// End of array's live range.
- result_pos = array;
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
- // Check that the separator is a flat ascii string.
- __ mov(current_string, separator);
- __ test(current_string, Immediate(kSmiTagMask));
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, Immediate(0));
+ __ Set(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ __ jmp(&loop_condition);
+ __ bind(&loop);
+ __ cmp(index, Operand(array_length));
+ __ j(greater_equal, &done);
+
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // If the separator is the empty string, replace it with NULL.
- // The test for NULL is quicker than the empty string test, in a loop.
- __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
- Immediate(0));
- Label separator_checked;
- __ j(not_zero, &separator_checked);
- __ mov(separator, Immediate(0));
- __ bind(&separator_checked);
-
- // Check that elements[0] is a flat ascii string, and copy it in new space.
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
- __ test(current_string, Immediate(kSmiTagMask));
+ __ add(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_condition);
+ __ cmp(index, Operand(array_length));
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, &not_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // Allocate space to copy it. Round up the size to the alignment granularity.
- __ mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- __ shr(current_string_length, 1);
-
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, Operand(scratch));
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
// Live registers and stack values:
- // current_string_length: length of elements[0].
-
- // New string result in new space = elements[0]
- __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
- index, no_reg, &bailout);
- __ mov(result, result_pos);
-
- // Adjust current_string_length to include padding bytes at end of string.
- // Keep track of the number of padding bytes.
- __ mov(new_padding_chars, current_string_length);
- __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- __ sub(new_padding_chars, Operand(current_string_length));
- __ neg(new_padding_chars);
- __ mov(padding_chars, new_padding_chars);
-
- Label copy_loop_1_done;
- Label copy_loop_1;
- __ test(current_string_length, Operand(current_string_length));
- __ j(zero, &copy_loop_1_done);
- __ bind(&copy_loop_1);
- __ sub(Operand(current_string_length), Immediate(kPointerSize));
- __ mov(scratch, FieldOperand(current_string, current_string_length,
- times_1, SeqAsciiString::kHeaderSize));
- __ mov(FieldOperand(result_pos, current_string_length,
- times_1, SeqAsciiString::kHeaderSize),
- scratch);
- __ j(not_zero, &copy_loop_1);
- __ bind(&copy_loop_1_done);
-
- __ mov(index, Immediate(1));
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
// Loop condition: while (index < length).
- Label loop;
- __ bind(&loop);
- __ cmp(index, array_length);
- __ j(greater_equal, &done);
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
- // If the separator is the empty string, signalled by NULL, skip it.
- Label separator_done;
- __ mov(current_string, separator);
- __ test(current_string, Operand(current_string));
- __ j(zero, &separator_done);
-
- // Append separator to result. It is known to be a flat ascii string.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
- __ bind(&separator_done);
-
- // Add next element of array to the end of the result.
- // Get current_string = array[index].
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- // If current != flat ascii string drop result, return undefined.
- __ test(current_string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- // Append current to the result.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
- __ jmp(&loop); // End while (index < length).
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
__ bind(&bailout);
- __ mov(result, Factory::undefined_value());
+ __ mov(result_operand, Factory::undefined_value());
__ bind(&done);
- __ mov(eax, result);
+ __ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(4 * kPointerSize));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
frame_->Drop(1);
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index ee9456564c..678cc93115 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -125,7 +125,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // no arguments
+ __ Set(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break()));
CEntryStub ceb(1);
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index d95df3e7ea..3050c5674f 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -56,8 +58,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
- int deoptimization_index = table.GetDeoptimizationIndex(i);
- int gap_code_size = table.GetGapCodeSize(i);
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ int deoptimization_index = safepoint_entry.deoptimization_index();
+ int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
@@ -105,23 +108,25 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
Code* replacement_code) {
- // The stack check code matches the pattern (on ia32, for example):
+ // The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
+ // test eax, <loop nesting depth>
// ok: ...
//
- // We will patch the code to:
+ // We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
+ // test eax, <loop nesting depth>
// ok:
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x05 && // offset
+ *(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
@@ -130,12 +135,14 @@ void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+ // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
+ // restore the conditional branch.
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x05; // offset
+ *(call_target_address - 2) = 0x07; // offset
rinfo->set_target_address(check_code->entry());
}
@@ -613,3 +620,5 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index dfbcbb76d5..4028a93421 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1182,15 +1182,33 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xF3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psllq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x73) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("psllq %s,%d",
+ ASSERT(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "psllq" : "psrlq",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xD3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psrlq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
@@ -1228,6 +1246,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xEB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("por %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 13a11777ab..2622b5e5b4 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -264,16 +264,24 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
- // Loop stack checks can be patched to perform on-stack
- // replacement. In order to decide whether or not to perform OSR we
- // embed the loop depth in a test instruction after the call so we
- // can extract it from the OSR builtin.
+
+ // Loop stack checks can be patched to perform on-stack replacement. In
+ // order to decide whether or not to perform OSR we embed the loop depth
+ // in a test instruction after the call so we can extract it from the OSR
+ // builtin.
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
@@ -379,7 +387,7 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ mov(result_register(), lit);
+ __ Set(result_register(), Immediate(lit));
}
@@ -1497,7 +1505,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
+ __ push(slot_operand);
__ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
@@ -1508,7 +1518,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} else {
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
+ __ push(slot_operand);
__ push(Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
@@ -3339,39 +3351,37 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout;
- Label done;
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop, loop_condition,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
// Load this to eax (= array)
VisitForAccumulatorValue(args->at(0));
-
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
- Register result_pos = no_reg;
+ Register elements = no_reg; // Will be eax.
- Register index = edi;
+ Register index = edx;
- Register current_string_length = ecx; // Will be ecx when live.
+ Register string_length = ecx;
- Register current_string = edx;
+ Register string = esi;
Register scratch = ebx;
- Register scratch_2 = esi;
- Register new_padding_chars = scratch_2;
-
- Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
- Operand elements = Operand(esp, 3 * kPointerSize);
- Operand result = Operand(esp, 2 * kPointerSize);
- Operand padding_chars = Operand(esp, 1 * kPointerSize);
- Operand array_length = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(4 * kPointerSize));
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
-
- // Check that eax is a JSArray
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -3382,140 +3392,226 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
- // If the array is empty, return the empty string.
- __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(scratch, 1);
- Label non_trivial;
- __ j(not_zero, &non_trivial);
- __ mov(result, Factory::empty_string());
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ sar(array_length, 1);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, Factory::empty_string());
__ jmp(&done);
- __ bind(&non_trivial);
- __ mov(array_length, scratch);
-
- __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
- __ mov(elements, scratch);
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+ // Save the FixedArray containing array's elements.
// End of array's live range.
- result_pos = array;
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
- // Check that the separator is a flat ascii string.
- __ mov(current_string, separator);
- __ test(current_string, Immediate(kSmiTagMask));
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, Immediate(0));
+ __ Set(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ __ jmp(&loop_condition);
+ __ bind(&loop);
+ __ cmp(index, Operand(array_length));
+ __ j(greater_equal, &done);
+
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // If the separator is the empty string, replace it with NULL.
- // The test for NULL is quicker than the empty string test, in a loop.
- __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
- Immediate(0));
- Label separator_checked;
- __ j(not_zero, &separator_checked);
- __ mov(separator, Immediate(0));
- __ bind(&separator_checked);
-
- // Check that elements[0] is a flat ascii string, and copy it in new space.
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
- __ test(current_string, Immediate(kSmiTagMask));
+ __ add(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_condition);
+ __ cmp(index, Operand(array_length));
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, &not_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // Allocate space to copy it. Round up the size to the alignment granularity.
- __ mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- __ shr(current_string_length, 1);
-
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, Operand(scratch));
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
// Live registers and stack values:
- // current_string_length: length of elements[0].
-
- // New string result in new space = elements[0]
- __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
- index, no_reg, &bailout);
- __ mov(result, result_pos);
-
- // Adjust current_string_length to include padding bytes at end of string.
- // Keep track of the number of padding bytes.
- __ mov(new_padding_chars, current_string_length);
- __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- __ sub(new_padding_chars, Operand(current_string_length));
- __ neg(new_padding_chars);
- __ mov(padding_chars, new_padding_chars);
-
- Label copy_loop_1_done;
- Label copy_loop_1;
- __ test(current_string_length, Operand(current_string_length));
- __ j(zero, &copy_loop_1_done);
- __ bind(&copy_loop_1);
- __ sub(Operand(current_string_length), Immediate(kPointerSize));
- __ mov(scratch, FieldOperand(current_string, current_string_length,
- times_1, SeqAsciiString::kHeaderSize));
- __ mov(FieldOperand(result_pos, current_string_length,
- times_1, SeqAsciiString::kHeaderSize),
- scratch);
- __ j(not_zero, &copy_loop_1);
- __ bind(&copy_loop_1_done);
-
- __ mov(index, Immediate(1));
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
// Loop condition: while (index < length).
- Label loop;
- __ bind(&loop);
- __ cmp(index, array_length);
- __ j(greater_equal, &done);
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
+
- // If the separator is the empty string, signalled by NULL, skip it.
- Label separator_done;
- __ mov(current_string, separator);
- __ test(current_string, Operand(current_string));
- __ j(zero, &separator_done);
-
- // Append separator to result. It is known to be a flat ascii string.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
- __ bind(&separator_done);
-
- // Add next element of array to the end of the result.
- // Get current_string = array[index].
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- // If current != flat ascii string drop result, return undefined.
- __ test(current_string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- // Append current to the result.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
- __ jmp(&loop); // End while (index < length).
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
__ bind(&bailout);
- __ mov(result, Factory::undefined_value());
+ __ mov(result_operand, Factory::undefined_value());
__ bind(&done);
- __ mov(eax, result);
+ __ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(5 * kPointerSize));
+ __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
@@ -3739,7 +3835,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
if (prop->is_arguments_access()) {
VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
+ __ push(slot_operand);
__ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
@@ -4042,7 +4140,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- __ IncrementCounter(&Counters::instance_of_full, 1);
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 9c9304d5a4..90bfd4b664 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1199,7 +1199,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
- __ xor_(ecx, Operand(ecx));
+ __ Set(ecx, Immediate(0));
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
@@ -1219,9 +1219,6 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
@@ -1567,9 +1564,6 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -1795,10 +1789,6 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
}
-// Defined in ic.cc.
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -1982,9 +1972,6 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
}
-// Defined in ic.cc.
-Object* KeyedStoreIC_Miss(Arguments args);
-
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d64f528e71..24ee1fefdb 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-codegen-ia32.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -54,6 +58,157 @@ class SafepointGenerator : public PostCallGenerator {
};
+class LGapNode: public ZoneObject {
+ public:
+ explicit LGapNode(LOperand* operand)
+ : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+ LOperand* operand() const { return operand_; }
+ bool IsResolved() const { return !IsAssigned() || resolved_; }
+ void MarkResolved() {
+ ASSERT(!IsResolved());
+ resolved_ = true;
+ }
+ int visited_id() const { return visited_id_; }
+ void set_visited_id(int id) {
+ ASSERT(id > visited_id_);
+ visited_id_ = id;
+ }
+
+ bool IsAssigned() const { return assigned_from_.is_set(); }
+ LGapNode* assigned_from() const { return assigned_from_.get(); }
+ void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+ LOperand* operand_;
+ SetOncePointer<LGapNode> assigned_from_;
+ bool resolved_;
+ int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+ : nodes_(32),
+ identified_cycles_(4),
+ result_(16),
+ next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+ const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand) {
+ nodes_.Rewind(0);
+ identified_cycles_.Rewind(0);
+ result_.Rewind(0);
+ next_visited_id_ = 0;
+
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) RegisterMove(move);
+ }
+
+ for (int i = 0; i < identified_cycles_.length(); ++i) {
+ ResolveCycle(identified_cycles_[i], marker_operand);
+ }
+
+ int unresolved_nodes;
+ do {
+ unresolved_nodes = 0;
+ for (int j = 0; j < nodes_.length(); j++) {
+ LGapNode* node = nodes_[j];
+ if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+ AddResultMove(node->assigned_from(), node);
+ node->MarkResolved();
+ }
+ if (!node->IsResolved()) ++unresolved_nodes;
+ }
+ } while (unresolved_nodes > 0);
+ return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+ AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+ result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+ ZoneList<LOperand*> cycle_operands(8);
+ cycle_operands.Add(marker_operand);
+ LGapNode* cur = start;
+ do {
+ cur->MarkResolved();
+ cycle_operands.Add(cur->operand());
+ cur = cur->assigned_from();
+ } while (cur != start);
+ cycle_operands.Add(marker_operand);
+
+ for (int i = cycle_operands.length() - 1; i > 0; --i) {
+ LOperand* from = cycle_operands[i];
+ LOperand* to = cycle_operands[i - 1];
+ AddResultMove(from, to);
+ }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+ ASSERT(a != b);
+ LGapNode* cur = a;
+ while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+ cur->set_visited_id(visited_id);
+ cur = cur->assigned_from();
+ }
+
+ return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+ ASSERT(a != b);
+ return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+ if (move.from()->IsConstantOperand()) {
+ // Constant moves should be last in the machine code. Therefore add them
+ // first to the result set.
+ AddResultMove(move.from(), move.to());
+ } else {
+ LGapNode* from = LookupNode(move.from());
+ LGapNode* to = LookupNode(move.to());
+ if (to->IsAssigned() && to->assigned_from() == from) {
+ move.Eliminate();
+ return;
+ }
+ ASSERT(!to->IsAssigned());
+ if (CanReach(from, to)) {
+ // This introduces a cycle. Save.
+ identified_cycles_.Add(from);
+ }
+ to->set_assigned_from(from);
+ }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+ for (int i = 0; i < nodes_.length(); ++i) {
+ if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+ }
+
+ // No node found => create a new one.
+ LGapNode* result = new LGapNode(operand);
+ nodes_.Add(result);
+ return result;
+}
+
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -135,6 +290,17 @@ bool LCodeGen::GeneratePrologue() {
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
}
}
@@ -261,6 +427,45 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
}
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@@ -385,7 +590,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
++frame_count;
}
Translation translation(&translations_, frame_count);
- environment->WriteTranslation(this, &translation);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
environment->Register(deoptimization_index, translation.index());
deoptimizations_.Add(environment);
@@ -564,8 +769,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
Register cpu_scratch = esi;
bool destroys_cpu_scratch = false;
- LGapResolver resolver(move->move_operands(), &marker_operand);
- const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+ const ZoneList<LMoveOperands>* moves =
+ resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
@@ -940,7 +1145,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), instr->value());
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -973,27 +1178,21 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
-void LCodeGen::DoArrayLength(LArrayLength* instr) {
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->input());
+ __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
+}
- if (instr->hydrogen()->value()->IsLoadElements()) {
- // We load the length directly from the elements array.
- Register elements = ToRegister(instr->input());
- __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
- } else {
- // Check that the receiver really is an array.
- Register array = ToRegister(instr->input());
- Register temporary = ToRegister(instr->temporary());
- __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
- DeoptimizeIf(not_equal, instr->environment());
- // Load length directly from the array.
- __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
- }
+void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->input());
+ __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
}
@@ -1700,7 +1899,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers eax and edx.
+ // Object and function are in fixed registers defined by the stub.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1726,6 +1925,107 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
}
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->input());
+ Register temp = ToRegister(instr->temp());
+
+ // A Smi is not instance of anything.
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, &false_result, not_taken);
+
+ // This is the inlined call site instanceof cache. The two occourences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ NearLabel cache_miss;
+ Register map = ToRegister(instr->temp());
+ __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
+ __ j(not_equal, &cache_miss, not_taken);
+ __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
+ __ jmp(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ cmp(object, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // String values are not instances of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp, temp);
+ __ j(is_string, &false_result);
+
+ // Go to the deferred code.
+ __ jmp(deferred->entry());
+
+ __ bind(&false_result);
+ __ mov(ToRegister(instr->result()), Factory::false_value());
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ __ PushSafepointRegisters();
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(flags);
+
+ // Get the temp register reserved by the instruction. This needs to be edi as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(temp.is(edi));
+ __ mov(InstanceofStub::right(), Immediate(instr->function()));
+ static const int kAdditionalDelta = 13;
+ int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ mov(temp, Immediate(delta));
+ __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT_EQ(kAdditionalDelta,
+ masm_->SizeOfCodeGeneratedSince(&before_push_delta));
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Put the result value into the eax slot and restore all registers.
+ __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
+
+ __ PopSafepointRegisters();
+}
+
+
static Condition ComputeCompareCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -1815,6 +2115,14 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ // TODO(antonm): load a context with a separate instruction.
+ Register result = ToRegister(instr->result());
+ __ LoadContext(result, instr->context_chain_length());
+ __ mov(result, ContextOperand(result, instr->slot_index()));
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register result = ToRegister(instr->result());
@@ -1837,6 +2145,48 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
}
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register temp = ToRegister(instr->temporary());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ NearLabel non_instance;
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+ 1 << Map::kHasNonInstancePrototype);
+ __ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ NearLabel done;
+ __ CmpObjectType(result, MAP_TYPE, temp);
+ __ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->input()));
Register reg = ToRegister(instr->input());
@@ -1863,6 +2213,8 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
__ sub(length, index);
DeoptimizeIf(below_equal, instr->environment());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
@@ -1870,32 +2222,15 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register key = ToRegister(instr->key());
- Register result;
- if (instr->load_result() != NULL) {
- result = ToRegister(instr->load_result());
- } else {
- result = ToRegister(instr->result());
- ASSERT(result.is(elements));
- }
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(elements));
// Load the result.
__ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- // Untag and check for smi.
- __ SmiUntag(result);
- DeoptimizeIf(carry, instr->environment());
- } else if (r.IsDouble()) {
- EmitNumberUntagD(result,
- ToDoubleRegister(instr->result()),
- instr->environment());
- } else {
- // Check for the hole value.
- ASSERT(r.IsTagged());
- __ cmp(result, Factory::the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
+ // Check for the hole value.
+ __ cmp(result, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
}
@@ -1912,7 +2247,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
// Check for arguments adapter frame.
- Label done, adapted;
+ NearLabel done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
__ cmp(Operand(result),
@@ -1927,7 +2262,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ bind(&adapted);
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- // Done. Pointer to topmost argument is in result.
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
__ bind(&done);
}
@@ -1936,9 +2272,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Operand elem = ToOperand(instr->input());
Register result = ToRegister(instr->result());
- Label done;
+ NearLabel done;
- // No arguments adaptor frame. Number of arguments is fixed.
+ // If no arguments adaptor frame the number of arguments is fixed.
__ cmp(ebp, elem);
__ mov(result, Immediate(scope()->num_parameters()));
__ j(equal, &done);
@@ -1949,7 +2285,7 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(result);
- // Done. Argument length is in result register.
+ // Argument length is in result register.
__ bind(&done);
}
@@ -2498,7 +2834,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
value);
}
- // Update the write barrier unless we're certain that we're storing a smi.
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
@@ -2849,9 +3184,60 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
- // This will bail out if the input was not in the int32 range (or,
- // unfortunately, if the input was 0x80000000).
- DeoptimizeIf(equal, instr->environment());
+ NearLabel done;
+ Register temp_reg = ToRegister(instr->temporary());
+ XMMRegister xmm_scratch = xmm0;
+
+ // If cvttsd2si succeeded, we're done. Otherwise, we attempt
+ // manual conversion.
+ __ j(not_equal, &done);
+
+ // Get high 32 bits of the input in result_reg and temp_reg.
+ __ pshufd(xmm_scratch, input_reg, 1);
+ __ movd(Operand(temp_reg), xmm_scratch);
+ __ mov(result_reg, temp_reg);
+
+ // Prepare negation mask in temp_reg.
+ __ sar(temp_reg, kBitsPerInt - 1);
+
+ // Extract the exponent from result_reg and subtract adjusted
+ // bias from it. The adjustment is selected in a way such that
+ // when the difference is zero, the answer is in the low 32 bits
+ // of the input, otherwise a shift has to be performed.
+ __ shr(result_reg, HeapNumber::kExponentShift);
+ __ and_(result_reg,
+ HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
+ __ sub(Operand(result_reg),
+ Immediate(HeapNumber::kExponentBias +
+ HeapNumber::kExponentBits +
+ HeapNumber::kMantissaBits));
+ // Don't handle big (> kMantissaBits + kExponentBits == 63) or
+ // special exponents.
+ DeoptimizeIf(greater, instr->environment());
+
+ // Zero out the sign and the exponent in the input (by shifting
+ // it to the left) and restore the implicit mantissa bit,
+ // i.e. convert the input to unsigned int64 shifted left by
+ // kExponentBits.
+ ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
+ // Minus zero has the most significant bit set and the other
+ // bits cleared.
+ __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
+ __ psllq(input_reg, HeapNumber::kExponentBits);
+ __ por(input_reg, xmm_scratch);
+
+ // Get the amount to shift the input right in xmm_scratch.
+ __ neg(result_reg);
+ __ movd(xmm_scratch, Operand(result_reg));
+
+ // Shift the input right and extract low 32 bits.
+ __ psrlq(input_reg, xmm_scratch);
+ __ movd(Operand(result_reg), input_reg);
+
+ // Use the prepared mask in temp_reg to negate the result if necessary.
+ __ xor_(result_reg, Operand(temp_reg));
+ __ sub(result_reg, Operand(temp_reg));
+ __ bind(&done);
}
} else {
NearLabel done;
@@ -2891,9 +3277,6 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
- __ test(input, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
@@ -2931,13 +3314,13 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
-void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
- if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+ if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(prototype);
+ Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
- __ mov(result, prototype);
+ __ mov(result, object);
}
}
@@ -2946,11 +3329,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
- Handle<Map> receiver_map = instr->receiver_map();
- Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+ Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -2960,7 +3342,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@@ -3006,7 +3388,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
__ push(Immediate(instr->hydrogen()->constant_properties()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
- // Pick the right runtime function or stub to call.
+ // Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else {
@@ -3274,3 +3656,5 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 6d8173a1cf..ef8fb5c493 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,8 +40,30 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
+class LGapNode;
class SafepointGenerator;
+class LGapResolver BASE_EMBEDDED {
+ public:
+ LGapResolver();
+ const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand);
+
+ private:
+ LGapNode* LookupNode(LOperand* operand);
+ bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+ bool CanReach(LGapNode* a, LGapNode* b);
+ void RegisterMove(LMoveOperands move);
+ void AddResultMove(LOperand* from, LOperand* to);
+ void AddResultMove(LGapNode* from, LGapNode* to);
+ void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+ ZoneList<LGapNode*> nodes_;
+ ZoneList<LGapNode*> identified_cycles_;
+ ZoneList<LMoveOperands> result_;
+ int next_visited_id_;
+};
+
class LCodeGen BASE_EMBEDDED {
public:
@@ -77,10 +99,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -148,7 +175,7 @@ class LCodeGen BASE_EMBEDDED {
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@@ -228,6 +255,9 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 3b272d0b02..254a47af78 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -64,12 +68,12 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
}
-void LInstruction::PrintTo(StringStream* stream) const {
+void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
if (HasResult()) {
- result()->PrintTo(stream);
- stream->Add(" ");
+ PrintOutputOperandTo(stream);
}
+
PrintDataTo(stream);
if (HasEnvironment()) {
@@ -84,37 +88,29 @@ void LInstruction::PrintTo(StringStream* stream) const {
}
-void LLabel::PrintDataTo(StringStream* stream) const {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < I; i++) {
+ stream->Add(i == 0 ? "= " : " ");
+ inputs_.at(i)->PrintTo(stream);
}
}
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ if (this->HasResult()) {
+ this->result()->PrintTo(stream);
+ stream->Add(" ");
}
- return true;
}
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- for (int i = move_operands_.length() - 1; i >= 0; --i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* from = move_operands_[i].from();
- LOperand* to = move_operands_[i].to();
- if (from->Equals(to)) {
- to->PrintTo(stream);
- } else {
- to->PrintTo(stream);
- stream->Add(" = ");
- from->PrintTo(stream);
- }
- stream->Add("; ");
- }
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
}
}
@@ -130,7 +126,7 @@ bool LGap::IsRedundant() const {
}
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
for (int i = 0; i < 4; i++) {
stream->Add("(");
if (parallel_moves_[i] != NULL) {
@@ -169,27 +165,18 @@ const char* LArithmeticT::Mnemonic() const {
}
-
-void LBinaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- left()->PrintTo(stream);
- stream->Add(" ");
- right()->PrintTo(stream);
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) const {
+void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
-void LBranch::PrintDataTo(StringStream* stream) const {
+void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
input()->PrintTo(stream);
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -198,7 +185,7 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
input()->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
@@ -206,35 +193,35 @@ void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
input()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
@@ -244,13 +231,13 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
}
-void LTypeofIs::PrintDataTo(StringStream* stream) const {
+void LTypeofIs::PrintDataTo(StringStream* stream) {
input()->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
input()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
@@ -259,59 +246,59 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
input()->PrintTo(stream);
}
-void LCallKeyed::PrintDataTo(StringStream* stream) const {
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
}
-void LCallNamed::PrintDataTo(StringStream* stream) const {
+void LCallNamed::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallGlobal::PrintDataTo(StringStream* stream) const {
+void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LCallNew::PrintDataTo(StringStream* stream) const {
- LUnaryOperation::PrintDataTo(stream);
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ input()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
-void LClassOfTest::PrintDataTo(StringStream* stream) const {
+void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
input()->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
-void LUnaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- input()->PrintTo(stream);
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -322,20 +309,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
-LChunk::LChunk(HGraph* graph)
- : spill_slot_count_(0),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
-}
-
-
-void LChunk::Verify() const {
- // TODO(twuerthinger): Implement verification for chunk.
-}
-
-
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
@@ -390,7 +363,7 @@ void LChunk::MarkEmptyBlocks() {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) const {
+void LStoreNamed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -399,7 +372,7 @@ void LStoreNamed::PrintDataTo(StringStream* stream) const {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -472,151 +445,6 @@ void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
}
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand)
- : nodes_(4),
- identified_cycles_(4),
- result_(4),
- marker_operand_(marker_operand),
- next_visited_id_(0) {
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i]);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start) {
- ZoneList<LOperand*> circle_operands(8);
- circle_operands.Add(marker_operand_);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- circle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- circle_operands.Add(marker_operand_);
-
- for (int i = circle_operands.length() - 1; i > 0; --i) {
- LOperand* from = circle_operands[i];
- LOperand* to = circle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.from()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.from(), move.to());
- } else {
- LGapNode* from = LookupNode(move.from());
- LGapNode* to = LookupNode(move.to());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a circle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
}
@@ -752,38 +580,54 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
}
-LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::NONE));
}
-LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr,
+ int index) {
+ return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg) {
return Define(instr, ToUnallocated(reg));
}
-LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
- XMMRegister reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -838,27 +682,19 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
}
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ allocator_->MarkAsSaveDoubles();
return instr;
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
return instr;
}
-LOperand* LChunkBuilder::Temp() {
- LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
@@ -934,10 +770,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
can_deopt = !can_truncate;
}
- LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
- if (can_deopt) AssignEnvironment(result);
- return result;
+ LShiftI* result = new LShiftI(op, left, right, can_deopt);
+ return can_deopt
+ ? AssignEnvironment(DefineSameAsFirst(result))
+ : DefineSameAsFirst(result);
}
@@ -966,7 +802,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
- LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1016,9 +852,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HInstruction* current = block->first();
int start = chunk_->instructions()->length();
while (current != NULL && !is_aborted()) {
- if (FLAG_trace_environment) {
- PrintF("Process instruction %d\n", current->id());
- }
// Code for constants in registers is generated lazily.
if (!current->EmitAtUses()) {
VisitInstruction(current);
@@ -1066,66 +899,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-void LEnvironment::WriteTranslation(LCodeGen* cgen,
- Translation* translation) const {
- if (this == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - parameter_count();
-
- outer()->WriteTranslation(cgen, translation);
- int closure_id = cgen->DefineDeoptimizationLiteral(closure());
- translation->BeginFrame(ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (spilled_registers_ != NULL && value != NULL) {
- if (value->IsRegister() &&
- spilled_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_registers_[value->index()],
- HasTaggedValueAt(i));
- } else if (value->IsDoubleRegister() &&
- spilled_double_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_double_registers_[value->index()],
- false);
- }
- }
-
- cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) const {
- stream->Add("[id=%d|", ast_id());
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
- int value_count = hydrogen_env->values()->length();
+ int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
ast_id,
hydrogen_env->parameter_count(),
@@ -1155,10 +935,11 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
+ LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
+ instr->include_stack_check());
+ return (instr->include_stack_check())
+ ? AssignPointerMap(result)
+ : result;
}
@@ -1185,32 +966,33 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
Token::Value op = compare->token();
HValue* left = compare->left();
HValue* right = compare->right();
- if (left->representation().IsInteger32()) {
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right),
first_id,
- second_id,
- false);
- } else if (left->representation().IsDouble()) {
+ second_id);
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right),
first_id,
- second_id,
- true);
+ second_id);
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
bool reversed = op == Token::GT || op == Token::LTE;
LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand,
- first_id,
- second_id);
+ LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+ right_operand,
+ first_id,
+ second_id);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
@@ -1241,7 +1023,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- compare->is_strict(),
temp,
first_id,
second_id);
@@ -1264,11 +1045,12 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
second_id);
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), eax),
- UseFixed(instance_of->right(), edx),
- first_id,
- second_id);
+ LInstanceOfAndBranch* result =
+ new LInstanceOfAndBranch(
+ UseFixed(instance_of->left(), InstanceofStub::left()),
+ UseFixed(instance_of->right(), InstanceofStub::right()),
+ first_id,
+ second_id);
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@@ -1295,12 +1077,7 @@ LInstruction* LChunkBuilder::DoCompareMapAndBranch(
HCompareMapAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- HBasicBlock* first = instr->FirstSuccessor();
- HBasicBlock* second = instr->SecondSuccessor();
- return new LCmpMapAndBranch(value,
- instr->map(),
- first->block_id(),
- second->block_id());
+ return new LCmpMapAndBranch(value);
}
@@ -1315,22 +1092,33 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstruction* result =
- new LInstanceOf(UseFixed(instr->left(), eax),
- UseFixed(instr->right(), edx));
+ LInstanceOf* result =
+ new LInstanceOf(UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(
+ UseFixed(instr->value(), InstanceofStub::left()),
+ FixedTemp(edi));
+ MarkAsSaveDoubles(result);
+ return AssignEnvironment(AssignPointerMap(DefineFixed(result, eax)));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LInstruction* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1363,11 +1151,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LInstruction* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1416,7 +1204,7 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
- LInstruction* result = new LCallNew(constructor);
+ LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1456,7 +1244,9 @@ LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new LBitNotI(input);
+ return DefineSameAsFirst(result);
}
@@ -1496,12 +1286,12 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- result = AssignEnvironment(result);
- }
- return result;
+ LModI* mod = new LModI(value, divisor);
+ LInstruction* result = DefineFixed(mod, edx);
+ return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero))
+ ? AssignEnvironment(result)
+ : result;
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
@@ -1598,21 +1388,26 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
- if (instr->left()->representation().IsInteger32()) {
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, false));
- } else if (instr->left()->representation().IsDouble()) {
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, true));
+ return DefineAsRegister(new LCmpID(left, right));
} else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LInstruction* result = new LCmpT(left, right);
+ LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
@@ -1622,7 +1417,7 @@ LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LInstruction* result = new LCmpJSObjectEq(left, right);
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
}
@@ -1631,8 +1426,7 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsNull(value,
- instr->is_strict()));
+ return DefineAsRegister(new LIsNull(value));
}
@@ -1677,25 +1471,21 @@ LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
- LOperand* array = NULL;
- LOperand* temporary = NULL;
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
+}
- if (instr->value()->IsLoadElements()) {
- array = UseRegisterAtStart(instr->value());
- } else {
- array = UseRegister(instr->value());
- temporary = TempRegister();
- }
- LInstruction* result = new LArrayLength(array, temporary);
- return AssignEnvironment(DefineAsRegister(result));
+LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayLength(array));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LInstruction* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1718,7 +1508,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1729,7 +1519,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
- LInstruction* res = new LTaggedToI(value, xmm_temp);
+ LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
return DefineSameAsFirst(new LSmiUntag(value, needs_check));
@@ -1742,12 +1532,16 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
- LInstruction* result = new LNumberTagD(value, temp);
+ LNumberTagD* result = new LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+ bool needs_temp = instr->CanTruncateToInt32() &&
+ !CpuFeatures::IsSupported(SSE3);
+ LOperand* value = needs_temp ?
+ UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
@@ -1756,7 +1550,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
- LInstruction* result = new LNumberTagI(value);
+ LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
@@ -1778,17 +1572,14 @@ LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LInstruction* result = new LCheckInstanceType(value, temp);
+ LCheckInstanceType* result = new LCheckInstanceType(value, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
- LInstruction* result =
- new LCheckPrototypeMaps(temp,
- instr->holder(),
- instr->receiver_map());
+ LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
@@ -1807,7 +1598,7 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckMap(value);
+ LCheckMap* result = new LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1828,14 +1619,14 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
} else {
- Abort("unsupported constant of type double");
+ UNREACHABLE();
return NULL;
}
}
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LInstruction* result = new LLoadGlobal;
+ LLoadGlobal* result = new LLoadGlobal;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
@@ -1847,16 +1638,30 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ return DefineAsRegister(new LLoadContextSlot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new LLoadNamedField(UseRegisterAtStart(instr->object())));
+ ASSERT(instr->representation().IsTagged());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new LLoadNamedField(obj));
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), eax);
- LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
- return MarkAsCall(result, instr);
+ LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new LLoadFunctionPrototype(UseRegister(instr->function()),
+ TempRegister())));
}
@@ -1868,23 +1673,12 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Representation r = instr->representation();
- LOperand* obj = UseRegisterAtStart(instr->object());
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* load_result = NULL;
- // Double needs an extra temp, because the result is converted from heap
- // number to a double register.
- if (r.IsDouble()) load_result = TempRegister();
- LInstruction* result = new LLoadKeyedFastElement(obj,
- key,
- load_result);
- if (r.IsDouble()) {
- result = DefineAsRegister(result);
- } else {
- result = DefineSameAsFirst(result);
- }
- return AssignEnvironment(result);
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1892,9 +1686,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
- LInstruction* result =
- DefineFixed(new LLoadKeyedGeneric(object, key), eax);
- return MarkAsCall(result, instr);
+ LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1931,7 +1724,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = !instr->value()->type().IsSmi();
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
@@ -1946,14 +1739,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
- return new LStoreNamedField(obj,
- instr->name(),
- val,
- instr->is_in_object(),
- instr->offset(),
- temp,
- needs_write_barrier,
- instr->transition());
+ return new LStoreNamedField(obj, val, temp);
}
@@ -1961,7 +1747,7 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), edx);
LOperand* val = UseFixed(instr->value(), eax);
- LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+ LStoreNamedGeneric* result = new LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
@@ -1987,8 +1773,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LInstruction* result = new LDeleteProperty(Use(instr->object()),
- UseOrConstant(instr->key()));
+ LDeleteProperty* result = new LDeleteProperty(Use(instr->object()),
+ UseOrConstant(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2029,13 +1815,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
- return DefineAsRegister(AssignEnvironment(result));
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LInstruction* result = new LTypeof(Use(instr->value()));
+ LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2059,20 +1845,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->Push(value);
}
}
-
- if (FLAG_trace_environment) {
- PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
- instr->ast_id(),
- instr->id());
- env->PrintToStd();
- }
- ASSERT(env->values()->length() == instr->environment_height());
+ ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new LLazyBailout;
- result = AssignEnvironment(result);
+ LLazyBailout* lazy_bailout = new LLazyBailout;
+ LInstruction* result = AssignEnvironment(lazy_bailout);
instructions_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
@@ -2108,21 +1887,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-void LPointerMap::RecordPointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) const {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("} @%d", position());
-}
-
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 3f48e50e22..07f0a8d90b 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
+#include "lithium.h"
#include "safepoint-table.h"
namespace v8 {
@@ -37,9 +38,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
-class LGapNode;
// Type hierarchy:
@@ -63,6 +61,7 @@ class LGapNode;
// LDivI
// LInstanceOf
// LInstanceOfAndBranch
+// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
// LModI
@@ -78,16 +77,20 @@ class LGapNode;
// LCallNamed
// LCallRuntime
// LCallStub
+// LCheckPrototypeMaps
// LConstant
// LConstantD
// LConstantI
// LConstantT
// LDeoptimize
// LFunctionLiteral
+// LGap
+// LLabel
// LGlobalObject
// LGlobalReceiver
-// LLabel
-// LLayzBailout
+// LGoto
+// LLazyBailout
+// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
@@ -104,19 +107,18 @@ class LGapNode;
// LStoreNamedField
// LStoreNamedGeneric
// LUnaryOperation
-// LArrayLength
// LBitNotI
// LBranch
// LCallNew
// LCheckFunction
// LCheckInstanceType
// LCheckMap
-// LCheckPrototypeMaps
// LCheckSmi
// LClassOfTest
// LClassOfTestAndBranch
// LDeleteProperty
// LDoubleToI
+// LFixedArrayLength
// LHasCachedArrayIndex
// LHasCachedArrayIndexAndBranch
// LHasInstanceType
@@ -128,8 +130,10 @@ class LGapNode;
// LIsObjectAndBranch
// LIsSmi
// LIsSmiAndBranch
+// LJSArrayLength
// LLoadNamedField
// LLoadNamedGeneric
+// LLoadFunctionPrototype
// LNumberTagD
// LNumberTagI
// LPushArgument
@@ -164,7 +168,6 @@ class LGapNode;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLength) \
V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
@@ -203,8 +206,10 @@ class LGapNode;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
+ V(FixedArrayLength) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
+ V(InstanceOfKnownGlobal) \
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
@@ -212,6 +217,7 @@ class LGapNode;
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(JSArrayLength) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(HasCachedArrayIndex) \
@@ -220,12 +226,14 @@ class LGapNode;
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -286,8 +294,9 @@ class LInstruction: public ZoneObject {
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream) const;
- virtual void PrintDataTo(StringStream* stream) const { }
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
// Declare virtual type testers.
#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
@@ -303,9 +312,7 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- void set_result(LOperand* operand) { result_.set(operand); }
- LOperand* result() const { return result_.get(); }
- bool HasResult() const { return result_.is_set(); }
+ virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -323,57 +330,66 @@ class LInstruction: public ZoneObject {
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
- SetOncePointer<LOperand> result_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
};
-class LGapResolver BASE_EMBEDDED {
+template<typename T, int N>
+class OperandContainer {
public:
- LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
- const ZoneList<LMoveOperands>* ResolveInReverseOrder();
-
+ OperandContainer() {
+ for (int i = 0; i < N; i++) elems_[i] = NULL;
+ }
+ int length() const { return N; }
+ T at(int i) const { return elems_[i]; }
+ void set_at(int i, T value) { elems_[i] = value; }
private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- LOperand* marker_operand_;
- int next_visited_id_;
- int bailout_after_ast_id_;
+ T elems_[N];
};
-class LParallelMove : public ZoneObject {
+template<typename T>
+class OperandContainer<T, 0> {
public:
- LParallelMove() : move_operands_(4) { }
-
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
+ int length() const { return 0; }
+ T at(int i) const {
+ UNREACHABLE();
+ return NULL;
}
+ void set_at(int i, T value) {
+ UNREACHABLE();
+ }
+};
- bool IsRedundant() const;
- const ZoneList<LMoveOperands>* move_operands() const {
- return &move_operands_;
- }
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { outputs_.set_at(0, operand); }
+ LOperand* result() const { return outputs_.at(0); }
+
+ int InputCount() const { return inputs_.length(); }
+ LOperand* InputAt(int i) const { return inputs_.at(i); }
+ void SetInputAt(int i, LOperand* operand) { inputs_.set_at(i, operand); }
- void PrintDataTo(StringStream* stream) const;
+ int TempCount() const { return temps_.length(); }
+ LOperand* TempAt(int i) const { return temps_.at(i); }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
private:
- ZoneList<LMoveOperands> move_operands_;
+ OperandContainer<LOperand*, R> outputs_;
+ OperandContainer<LOperand*, I> inputs_;
+ OperandContainer<LOperand*, T> temps_;
};
-class LGap: public LInstruction {
+class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -384,7 +400,7 @@ class LGap: public LInstruction {
}
DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
bool IsRedundant() const;
@@ -414,13 +430,13 @@ class LGap: public LInstruction {
};
-class LGoto: public LInstruction {
+class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
LGoto(int block_id, bool include_stack_check = false)
: block_id_(block_id), include_stack_check_(include_stack_check) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
@@ -432,7 +448,7 @@ class LGoto: public LInstruction {
};
-class LLazyBailout: public LInstruction {
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -448,7 +464,7 @@ class LLazyBailout: public LInstruction {
};
-class LDeoptimize: public LInstruction {
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
@@ -461,7 +477,7 @@ class LLabel: public LGap {
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -476,13 +492,13 @@ class LLabel: public LGap {
};
-class LParameter: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LInstruction {
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -493,96 +509,89 @@ class LCallStub: public LInstruction {
};
-class LUnknownOSRValue: public LInstruction {
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
-class LUnaryOperation: public LInstruction {
+template<int R>
+class LUnaryOperation: public LTemplateInstruction<R, 1, 0> {
public:
- explicit LUnaryOperation(LOperand* input) : input_(input) { }
-
- DECLARE_INSTRUCTION(UnaryOperation)
-
- LOperand* input() const { return input_; }
+ explicit LUnaryOperation<R>(LOperand* input) {
+ this->SetInputAt(0, input);
+ }
- virtual void PrintDataTo(StringStream* stream) const;
+ LOperand* input() const { return this->InputAt(0); }
- private:
- LOperand* input_;
+ DECLARE_INSTRUCTION(UnaryOperation)
};
-class LBinaryOperation: public LInstruction {
+template<int R>
+class LBinaryOperation: public LTemplateInstruction<R, 2, 0> {
public:
- LBinaryOperation(LOperand* left, LOperand* right)
- : left_(left), right_(right) { }
+ LBinaryOperation(LOperand* left, LOperand* right) {
+ this->SetInputAt(0, left);
+ this->SetInputAt(1, right);
+ }
DECLARE_INSTRUCTION(BinaryOperation)
- LOperand* left() const { return left_; }
- LOperand* right() const { return right_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* left_;
- LOperand* right_;
+ LOperand* left() const { return this->InputAt(0); }
+ LOperand* right() const { return this->InputAt(1); }
};
-class LApplyArguments: public LBinaryOperation {
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements)
- : LBinaryOperation(function, receiver),
- length_(length),
- elements_(elements) { }
+ LOperand* elements) {
+ this->SetInputAt(0, function);
+ this->SetInputAt(1, receiver);
+ this->SetInputAt(2, length);
+ this->SetInputAt(3, elements);
+ }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- LOperand* function() const { return left(); }
- LOperand* receiver() const { return right(); }
- LOperand* length() const { return length_; }
- LOperand* elements() const { return elements_; }
-
- private:
- LOperand* length_;
- LOperand* elements_;
+ LOperand* function() const { return InputAt(0); }
+ LOperand* receiver() const { return InputAt(1); }
+ LOperand* length() const { return InputAt(2); }
+ LOperand* elements() const { return InputAt(3); }
};
-class LAccessArgumentsAt: public LInstruction {
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
- : arguments_(arguments), length_(length), index_(index) { }
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ this->SetInputAt(0, arguments);
+ this->SetInputAt(1, length);
+ this->SetInputAt(2, index);
+ }
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- LOperand* arguments() const { return arguments_; }
- LOperand* length() const { return length_; }
- LOperand* index() const { return index_; }
+ LOperand* arguments() const { return this->InputAt(0); }
+ LOperand* length() const { return this->InputAt(1); }
+ LOperand* index() const { return this->InputAt(2); }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* arguments_;
- LOperand* length_;
- LOperand* index_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LArgumentsLength: public LUnaryOperation {
+class LArgumentsLength: public LUnaryOperation<1> {
public:
- explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+ explicit LArgumentsLength(LOperand* elements)
+ : LUnaryOperation<1>(elements) {}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
-class LArgumentsElements: public LInstruction {
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
@@ -590,29 +599,29 @@ class LArgumentsElements: public LInstruction {
};
-class LModI: public LBinaryOperation {
+class LModI: public LBinaryOperation<1> {
public:
- LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+ LModI(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LBinaryOperation {
+class LDivI: public LBinaryOperation<1> {
public:
LDivI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMulI: public LBinaryOperation {
+class LMulI: public LBinaryOperation<1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp)
- : LBinaryOperation(left, right), temp_(temp) { }
+ : LBinaryOperation<1>(left, right), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -624,36 +633,33 @@ class LMulI: public LBinaryOperation {
};
-class LCmpID: public LBinaryOperation {
+class LCmpID: public LBinaryOperation<1> {
public:
- LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
- : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+ LCmpID(LOperand* left, LOperand* right)
+ : LBinaryOperation<1>(left, right) { }
- Token::Value op() const { return op_; }
- bool is_double() const { return is_double_; }
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-
- private:
- Token::Value op_;
- bool is_double_;
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
};
class LCmpIDAndBranch: public LCmpID {
public:
- LCmpIDAndBranch(Token::Value op,
- LOperand* left,
+ LCmpIDAndBranch(LOperand* left,
LOperand* right,
int true_block_id,
- int false_block_id,
- bool is_double)
- : LCmpID(op, left, right, is_double),
+ int false_block_id)
+ : LCmpID(left, right),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -665,23 +671,23 @@ class LCmpIDAndBranch: public LCmpID {
};
-class LUnaryMathOperation: public LUnaryOperation {
+class LUnaryMathOperation: public LUnaryOperation<1> {
public:
explicit LUnaryMathOperation(LOperand* value)
- : LUnaryOperation(value) { }
+ : LUnaryOperation<1>(value) { }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
-class LCmpJSObjectEq: public LBinaryOperation {
+class LCmpJSObjectEq: public LBinaryOperation<1> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) {}
+ : LBinaryOperation<1>(left, right) {}
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
};
@@ -709,34 +715,30 @@ class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
};
-class LIsNull: public LUnaryOperation {
+class LIsNull: public LUnaryOperation<1> {
public:
- LIsNull(LOperand* value, bool is_strict)
- : LUnaryOperation(value), is_strict_(is_strict) {}
+ explicit LIsNull(LOperand* value) : LUnaryOperation<1>(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
- bool is_strict() const { return is_strict_; }
-
- private:
- bool is_strict_;
+ bool is_strict() const { return hydrogen()->is_strict(); }
};
class LIsNullAndBranch: public LIsNull {
public:
LIsNullAndBranch(LOperand* value,
- bool is_strict,
LOperand* temp,
int true_block_id,
int false_block_id)
- : LIsNull(value, is_strict),
+ : LIsNull(value),
temp_(temp),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -751,10 +753,10 @@ class LIsNullAndBranch: public LIsNull {
};
-class LIsObject: public LUnaryOperation {
+class LIsObject: public LUnaryOperation<1> {
public:
LIsObject(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) {}
+ : LUnaryOperation<1>(value), temp_(temp) {}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
@@ -778,7 +780,7 @@ class LIsObjectAndBranch: public LIsObject {
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -793,9 +795,9 @@ class LIsObjectAndBranch: public LIsObject {
};
-class LIsSmi: public LUnaryOperation {
+class LIsSmi: public LUnaryOperation<1> {
public:
- explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsSmi(LOperand* value) : LUnaryOperation<1>(value) {}
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
@@ -812,7 +814,7 @@ class LIsSmiAndBranch: public LIsSmi {
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -824,10 +826,10 @@ class LIsSmiAndBranch: public LIsSmi {
};
-class LHasInstanceType: public LUnaryOperation {
+class LHasInstanceType: public LUnaryOperation<1> {
public:
explicit LHasInstanceType(LOperand* value)
- : LUnaryOperation(value) { }
+ : LUnaryOperation<1>(value) { }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
@@ -850,7 +852,7 @@ class LHasInstanceTypeAndBranch: public LHasInstanceType {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -865,9 +867,9 @@ class LHasInstanceTypeAndBranch: public LHasInstanceType {
};
-class LHasCachedArrayIndex: public LUnaryOperation {
+class LHasCachedArrayIndex: public LUnaryOperation<1> {
public:
- explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+ explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation<1>(value) {}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
@@ -885,7 +887,7 @@ class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -897,20 +899,20 @@ class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
};
-class LClassOfTest: public LUnaryOperation {
+class LClassOfTest: public LUnaryOperation<1> {
public:
LClassOfTest(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temporary_(temp) {}
+ : LUnaryOperation<1>(value), temporary_(temp) {}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
LOperand* temporary() { return temporary_; }
private:
- LOperand *temporary_;
+ LOperand* temporary_;
};
@@ -928,7 +930,7 @@ class LClassOfTestAndBranch: public LClassOfTest {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -942,9 +944,9 @@ class LClassOfTestAndBranch: public LClassOfTest {
};
-class LCmpT: public LBinaryOperation {
+class LCmpT: public LBinaryOperation<1> {
public:
- LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+ LCmpT(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) {}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
@@ -974,10 +976,10 @@ class LCmpTAndBranch: public LCmpT {
};
-class LInstanceOf: public LBinaryOperation {
+class LInstanceOf: public LBinaryOperation<1> {
public:
LInstanceOf(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -1004,10 +1006,27 @@ class LInstanceOfAndBranch: public LInstanceOf {
};
-class LBoundsCheck: public LBinaryOperation {
+class LInstanceOfKnownGlobal: public LUnaryOperation<1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* left, LOperand* temp)
+ : LUnaryOperation<1>(left), temp_(temp) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LOperand* temp() const { return temp_; }
+
+ private:
+ LOperand* temp_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation<0> {
public:
LBoundsCheck(LOperand* index, LOperand* length)
- : LBinaryOperation(index, length) { }
+ : LBinaryOperation<0>(index, length) { }
LOperand* index() const { return left(); }
LOperand* length() const { return right(); }
@@ -1016,10 +1035,10 @@ class LBoundsCheck: public LBinaryOperation {
};
-class LBitI: public LBinaryOperation {
+class LBitI: public LBinaryOperation<1> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : LBinaryOperation<1>(left, right), op_(op) { }
Token::Value op() const { return op_; }
@@ -1030,10 +1049,10 @@ class LBitI: public LBinaryOperation {
};
-class LShiftI: public LBinaryOperation {
+class LShiftI: public LBinaryOperation<1> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+ : LBinaryOperation<1>(left, right), op_(op), can_deopt_(can_deopt) { }
Token::Value op() const { return op_; }
@@ -1047,17 +1066,17 @@ class LShiftI: public LBinaryOperation {
};
-class LSubI: public LBinaryOperation {
+class LSubI: public LBinaryOperation<1> {
public:
LSubI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LConstant: public LInstruction {
+class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
@@ -1098,17 +1117,17 @@ class LConstantT: public LConstant {
};
-class LBranch: public LUnaryOperation {
+class LBranch: public LUnaryOperation<0> {
public:
LBranch(LOperand* input, int true_block_id, int false_block_id)
- : LUnaryOperation(input),
+ : LUnaryOperation<0>(input),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -1120,51 +1139,47 @@ class LBranch: public LUnaryOperation {
};
-class LCmpMapAndBranch: public LUnaryOperation {
+class LCmpMapAndBranch: public LUnaryOperation<0> {
public:
- LCmpMapAndBranch(LOperand* value,
- Handle<Map> map,
- int true_block_id,
- int false_block_id)
- : LUnaryOperation(value),
- map_(map),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LCmpMapAndBranch(LOperand* value) : LUnaryOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
virtual bool IsControl() const { return true; }
- Handle<Map> map() const { return map_; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- Handle<Map> map_;
- int true_block_id_;
- int false_block_id_;
+ Handle<Map> map() const { return hydrogen()->map(); }
+ int true_block_id() const {
+ return hydrogen()->true_destination()->block_id();
+ }
+ int false_block_id() const {
+ return hydrogen()->false_destination()->block_id();
+ }
};
-class LArrayLength: public LUnaryOperation {
+class LJSArrayLength: public LUnaryOperation<1> {
public:
- LArrayLength(LOperand* input, LOperand* temporary)
- : LUnaryOperation(input), temporary_(temporary) { }
+ explicit LJSArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
- LOperand* temporary() const { return temporary_; }
+ DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
- DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
- private:
- LOperand* temporary_;
+class LFixedArrayLength: public LUnaryOperation<1> {
+ public:
+ explicit LFixedArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
};
-class LValueOf: public LUnaryOperation {
+class LValueOf: public LUnaryOperation<1> {
public:
LValueOf(LOperand* input, LOperand* temporary)
- : LUnaryOperation(input), temporary_(temporary) { }
+ : LUnaryOperation<1>(input), temporary_(temporary) { }
LOperand* temporary() const { return temporary_; }
@@ -1176,46 +1191,46 @@ class LValueOf: public LUnaryOperation {
};
-class LThrow: public LUnaryOperation {
+class LThrow: public LUnaryOperation<0> {
public:
- explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+ explicit LThrow(LOperand* value) : LUnaryOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LBitNotI: public LUnaryOperation {
+class LBitNotI: public LUnaryOperation<1> {
public:
- explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LBitNotI(LOperand* input) : LUnaryOperation<1>(input) { }
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
-class LAddI: public LBinaryOperation {
+class LAddI: public LBinaryOperation<1> {
public:
LAddI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LPower: public LBinaryOperation {
+class LPower: public LBinaryOperation<1> {
public:
LPower(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ : LBinaryOperation<1>(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
-class LArithmeticD: public LBinaryOperation {
+class LArithmeticD: public LBinaryOperation<1> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : LBinaryOperation<1>(left, right), op_(op) { }
Token::Value op() const { return op_; }
@@ -1227,10 +1242,10 @@ class LArithmeticD: public LBinaryOperation {
};
-class LArithmeticT: public LBinaryOperation {
+class LArithmeticT: public LBinaryOperation<1> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : LBinaryOperation<1>(left, right), op_(op) { }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1242,26 +1257,26 @@ class LArithmeticT: public LBinaryOperation {
};
-class LReturn: public LUnaryOperation {
+class LReturn: public LUnaryOperation<0> {
public:
- explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+ explicit LReturn(LOperand* use) : LUnaryOperation<0>(use) { }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LUnaryOperation {
+class LLoadNamedField: public LUnaryOperation<1> {
public:
- explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedField(LOperand* object) : LUnaryOperation<1>(object) { }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedGeneric: public LUnaryOperation {
+class LLoadNamedGeneric: public LUnaryOperation<1> {
public:
- explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation<1>(object) { }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1271,38 +1286,47 @@ class LLoadNamedGeneric: public LUnaryOperation {
};
-class LLoadElements: public LUnaryOperation {
+class LLoadFunctionPrototype: public LUnaryOperation<1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temporary)
+ : LUnaryOperation<1>(function), temporary_(temporary) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+ LOperand* function() const { return input(); }
+ LOperand* temporary() const { return temporary_; }
+
+ private:
+ LOperand* temporary_;
+};
+
+
+class LLoadElements: public LUnaryOperation<1> {
public:
- explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+ explicit LLoadElements(LOperand* obj) : LUnaryOperation<1>(obj) { }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
-class LLoadKeyedFastElement: public LBinaryOperation {
+class LLoadKeyedFastElement: public LBinaryOperation<1> {
public:
- LLoadKeyedFastElement(LOperand* elements,
- LOperand* key,
- LOperand* load_result)
- : LBinaryOperation(elements, key),
- load_result_(load_result) { }
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key)
+ : LBinaryOperation<1>(elements, key) { }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
LOperand* elements() const { return left(); }
LOperand* key() const { return right(); }
- LOperand* load_result() const { return load_result_; }
-
- private:
- LOperand* load_result_;
};
-class LLoadKeyedGeneric: public LBinaryOperation {
+class LLoadKeyedGeneric: public LBinaryOperation<1> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key)
- : LBinaryOperation(obj, key) { }
+ : LBinaryOperation<1>(obj, key) { }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
@@ -1311,78 +1335,92 @@ class LLoadKeyedGeneric: public LBinaryOperation {
};
-class LLoadGlobal: public LInstruction {
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
};
-class LStoreGlobal: public LUnaryOperation {
+class LStoreGlobal: public LUnaryOperation<0> {
public:
- explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+ explicit LStoreGlobal(LOperand* value) : LUnaryOperation<0>(value) {}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
};
-class LPushArgument: public LUnaryOperation {
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() const {
+ return hydrogen()->context_chain_length();
+ }
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LUnaryOperation<0> {
public:
- explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+ explicit LPushArgument(LOperand* argument) : LUnaryOperation<0>(argument) {}
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
-class LGlobalObject: public LInstruction {
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LInstruction {
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
-class LCallConstantFunction: public LInstruction {
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- Handle<JSFunction> function() const { return hydrogen()->function(); }
+ Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LInstruction {
+class LCallKeyed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LInstruction {
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LInstruction {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1391,44 +1429,44 @@ class LCallFunction: public LInstruction {
};
-class LCallGlobal: public LInstruction {
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LInstruction {
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LUnaryOperation {
+class LCallNew: public LUnaryOperation<1> {
public:
- explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+ explicit LCallNew(LOperand* constructor) : LUnaryOperation<1>(constructor) { }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LInstruction {
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1438,26 +1476,26 @@ class LCallRuntime: public LInstruction {
};
-class LInteger32ToDouble: public LUnaryOperation {
+class LInteger32ToDouble: public LUnaryOperation<1> {
public:
- explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+ explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation<1>(use) { }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
-class LNumberTagI: public LUnaryOperation {
+class LNumberTagI: public LUnaryOperation<1> {
public:
- explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LNumberTagI(LOperand* use) : LUnaryOperation<1>(use) { }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagD: public LUnaryOperation {
+class LNumberTagD: public LUnaryOperation<1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ : LUnaryOperation<1>(value), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
@@ -1469,22 +1507,27 @@ class LNumberTagD: public LUnaryOperation {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LUnaryOperation {
+class LDoubleToI: public LUnaryOperation<1> {
public:
- explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+ LDoubleToI(LOperand* value, LOperand* temporary)
+ : LUnaryOperation<1>(value), temporary_(temporary) { }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+ LOperand* temporary() const { return temporary_; }
+
+ private:
+ LOperand* temporary_;
};
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LUnaryOperation {
+class LTaggedToI: public LUnaryOperation<1> {
public:
LTaggedToI(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ : LUnaryOperation<1>(value), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
@@ -1497,26 +1540,26 @@ class LTaggedToI: public LUnaryOperation {
};
-class LSmiTag: public LUnaryOperation {
+class LSmiTag: public LUnaryOperation<1> {
public:
- explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+ explicit LSmiTag(LOperand* use) : LUnaryOperation<1>(use) { }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
-class LNumberUntagD: public LUnaryOperation {
+class LNumberUntagD: public LUnaryOperation<1> {
public:
- explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+ explicit LNumberUntagD(LOperand* value) : LUnaryOperation<1>(value) { }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
};
-class LSmiUntag: public LUnaryOperation {
+class LSmiUntag: public LUnaryOperation<1> {
public:
LSmiUntag(LOperand* use, bool needs_check)
- : LUnaryOperation(use), needs_check_(needs_check) { }
+ : LUnaryOperation<1>(use), needs_check_(needs_check) { }
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
@@ -1527,89 +1570,69 @@ class LSmiUntag: public LUnaryOperation {
};
-class LStoreNamed: public LInstruction {
+class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
- : object_(obj), name_(name), value_(val) { }
+ LStoreNamed(LOperand* obj, LOperand* val) {
+ this->SetInputAt(0, obj);
+ this->SetInputAt(1, val);
+ }
DECLARE_INSTRUCTION(StoreNamed)
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
- virtual void PrintDataTo(StringStream* stream) const;
-
- LOperand* object() const { return object_; }
- Handle<Object> name() const { return name_; }
- LOperand* value() const { return value_; }
+ virtual void PrintDataTo(StringStream* stream);
- private:
- LOperand* object_;
- Handle<Object> name_;
- LOperand* value_;
+ LOperand* object() const { return this->InputAt(0); }
+ LOperand* value() const { return this->InputAt(1); }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
class LStoreNamedField: public LStoreNamed {
public:
- LStoreNamedField(LOperand* obj,
- Handle<Object> name,
- LOperand* val,
- bool in_object,
- int offset,
- LOperand* temp,
- bool needs_write_barrier,
- Handle<Map> transition)
- : LStoreNamed(obj, name, val),
- is_in_object_(in_object),
- offset_(offset),
- temp_(temp),
- needs_write_barrier_(needs_write_barrier),
- transition_(transition) { }
+ LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
+ : LStoreNamed(obj, val), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
- bool is_in_object() { return is_in_object_; }
- int offset() { return offset_; }
LOperand* temp() { return temp_; }
- bool needs_write_barrier() { return needs_write_barrier_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
private:
- bool is_in_object_;
- int offset_;
LOperand* temp_;
- bool needs_write_barrier_;
- Handle<Map> transition_;
};
class LStoreNamedGeneric: public LStoreNamed {
public:
- LStoreNamedGeneric(LOperand* obj,
- Handle<Object> name,
- LOperand* val)
- : LStoreNamed(obj, name, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val)
+ : LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
-class LStoreKeyed: public LInstruction {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
- : object_(obj), key_(key), value_(val) { }
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ this->SetInputAt(0, obj);
+ this->SetInputAt(1, key);
+ this->SetInputAt(2, val);
+ }
DECLARE_INSTRUCTION(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
- LOperand* key() const { return key_; }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- LOperand* key_;
- LOperand* value_;
+ LOperand* object() const { return this->InputAt(0); }
+ LOperand* key() const { return this->InputAt(1); }
+ LOperand* value() const { return this->InputAt(2); }
};
@@ -1633,19 +1656,19 @@ class LStoreKeyedGeneric: public LStoreKeyed {
};
-class LCheckFunction: public LUnaryOperation {
+class LCheckFunction: public LUnaryOperation<0> {
public:
- explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckFunction(LOperand* use) : LUnaryOperation<0>(use) { }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
-class LCheckInstanceType: public LUnaryOperation {
+class LCheckInstanceType: public LUnaryOperation<0> {
public:
LCheckInstanceType(LOperand* use, LOperand* temp)
- : LUnaryOperation(use), temp_(temp) { }
+ : LUnaryOperation<0>(use), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
@@ -1657,41 +1680,36 @@ class LCheckInstanceType: public LUnaryOperation {
};
-class LCheckMap: public LUnaryOperation {
+class LCheckMap: public LUnaryOperation<0> {
public:
- explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckMap(LOperand* use) : LUnaryOperation<0>(use) { }
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
-class LCheckPrototypeMaps: public LInstruction {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 0> {
public:
- LCheckPrototypeMaps(LOperand* temp,
- Handle<JSObject> holder,
- Handle<Map> receiver_map)
- : temp_(temp),
- holder_(holder),
- receiver_map_(receiver_map) { }
+ explicit LCheckPrototypeMaps(LOperand* temp) : temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
LOperand* temp() const { return temp_; }
- Handle<JSObject> holder() const { return holder_; }
- Handle<Map> receiver_map() const { return receiver_map_; }
private:
LOperand* temp_;
- Handle<JSObject> holder_;
- Handle<Map> receiver_map_;
};
-class LCheckSmi: public LUnaryOperation {
+class LCheckSmi: public LUnaryOperation<0> {
public:
LCheckSmi(LOperand* use, Condition condition)
- : LUnaryOperation(use), condition_(condition) { }
+ : LUnaryOperation<0>(use), condition_(condition) { }
Condition condition() const { return condition_; }
@@ -1705,7 +1723,7 @@ class LCheckSmi: public LUnaryOperation {
};
-class LMaterializedLiteral: public LInstruction {
+class LMaterializedLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_INSTRUCTION(MaterializedLiteral)
};
@@ -1732,7 +1750,7 @@ class LRegExpLiteral: public LMaterializedLiteral {
};
-class LFunctionLiteral: public LInstruction {
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -1741,18 +1759,18 @@ class LFunctionLiteral: public LInstruction {
};
-class LTypeof: public LUnaryOperation {
+class LTypeof: public LUnaryOperation<1> {
public:
- explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+ explicit LTypeof(LOperand* input) : LUnaryOperation<1>(input) { }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LUnaryOperation {
+class LTypeofIs: public LUnaryOperation<1> {
public:
- explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit LTypeofIs(LOperand* input) : LUnaryOperation<1>(input) { }
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
@@ -1772,7 +1790,7 @@ class LTypeofIsAndBranch: public LTypeofIs {
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -1784,9 +1802,10 @@ class LTypeofIsAndBranch: public LTypeofIs {
};
-class LDeleteProperty: public LBinaryOperation {
+class LDeleteProperty: public LBinaryOperation<1> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+ LDeleteProperty(LOperand* obj, LOperand* key)
+ : LBinaryOperation<1>(obj, key) { }
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
@@ -1795,7 +1814,7 @@ class LDeleteProperty: public LBinaryOperation {
};
-class LOsrEntry: public LInstruction {
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
@@ -1818,118 +1837,21 @@ class LOsrEntry: public LInstruction {
};
-class LStackCheck: public LInstruction {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
};
-class LPointerMap: public ZoneObject {
- public:
- explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
- int position() const { return position_; }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- ASSERT(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op);
- void PrintTo(StringStream* stream) const;
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- int ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer)
- : closure_(closure),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- values_(value_count),
- representations_(value_count),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer) {
- }
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
-
- void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
- representations_.Add(representation);
- }
-
- bool HasTaggedValueAt(int index) const {
- return representations_[index].IsTagged();
- }
-
- void Register(int deoptimization_index, int translation_index) {
- ASSERT(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
- // Emit frame translation commands for this environment.
- void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
-
- void PrintTo(StringStream* stream) const;
-
- private:
- Handle<JSFunction> closure_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- int ast_id_;
- int parameter_count_;
- ZoneList<LOperand*> values_;
- ZoneList<Representation> representations_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
-
- LEnvironment* outer_;
-};
-
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph);
+ explicit LChunk(HGraph* graph)
+ : spill_slot_count_(0),
+ graph_(graph),
+ instructions_(32),
+ pointer_maps_(8),
+ inlined_closures_(1) { }
int AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
@@ -1976,8 +1898,6 @@ class LChunk: public ZoneObject {
inlined_closures_.Add(closure);
}
- void Verify() const;
-
private:
int spill_slot_count_;
HGraph* const graph_;
@@ -2060,14 +1980,24 @@ class LChunkBuilder BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- LInstruction* Define(LInstruction* instr, LUnallocated* result);
- LInstruction* Define(LInstruction* instr);
- LInstruction* DefineAsRegister(LInstruction* instr);
- LInstruction* DefineAsSpilled(LInstruction* instr, int index);
- LInstruction* DefineSameAsAny(LInstruction* instr);
- LInstruction* DefineSameAsFirst(LInstruction* instr);
- LInstruction* DefineFixed(LInstruction* instr, Register reg);
- LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2080,6 +2010,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
@@ -2087,8 +2018,6 @@ class LChunkBuilder BASE_EMBEDDED {
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
- // Temporary operand that may be a memory location.
- LOperand* Temp();
// Temporary operand that must be in a register.
LUnallocated* TempRegister();
LOperand* FixedTemp(Register reg);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 7c33906527..10c942a5b3 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -877,55 +877,53 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Immediate(Factory::cons_ascii_string_map()));
}
-// All registers must be distinct. Only current_string needs valid contents
-// on entry. All registers may be invalid on exit. result_operand is
-// unchanged, padding_chars is updated correctly.
-void MacroAssembler::AppendStringToTopOfNewSpace(
- Register current_string, // Tagged pointer to string to copy.
- Register current_string_length,
- Register result_pos,
- Register scratch,
- Register new_padding_chars,
- Operand operand_result,
- Operand operand_padding_chars,
- Label* bailout) {
- mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- shr(current_string_length, 1);
- sub(current_string_length, operand_padding_chars);
- mov(new_padding_chars, current_string_length);
- add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- sub(new_padding_chars, Operand(current_string_length));
- neg(new_padding_chars);
- // We need an allocation even if current_string_length is 0, to fetch
- // result_pos. Consider using a faster fetch of result_pos in that case.
- AllocateInNewSpace(current_string_length, result_pos, scratch, no_reg,
- bailout, NO_ALLOCATION_FLAGS);
- sub(result_pos, operand_padding_chars);
- mov(operand_padding_chars, new_padding_chars);
-
- Register scratch_2 = new_padding_chars; // Used to compute total length.
- // Copy string to the end of result.
- mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- mov(scratch, operand_result);
- mov(scratch_2, current_string_length);
- add(scratch_2, FieldOperand(scratch, String::kLengthOffset));
- mov(FieldOperand(scratch, String::kLengthOffset), scratch_2);
- shr(current_string_length, 1);
- lea(current_string,
- FieldOperand(current_string, SeqAsciiString::kHeaderSize));
- // Loop condition: while (--current_string_length >= 0).
- Label copy_loop;
- Label copy_loop_entry;
- jmp(&copy_loop_entry);
- bind(&copy_loop);
- mov_b(scratch, Operand(current_string, current_string_length, times_1, 0));
- mov_b(Operand(result_pos, current_string_length, times_1, 0), scratch);
- bind(&copy_loop_entry);
- sub(Operand(current_string_length), Immediate(1));
- j(greater_equal, &copy_loop);
+
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Source and destination are incremented by length.
+// Many variants of movsb, loop unrolling, word moves, and indexed operands
+// have been tried here already, and this is fastest.
+// A simpler loop is faster on small copies, but 30% slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch) {
+ Label loop, done, short_string, short_loop;
+ // Experimentation shows that the short string loop is faster if length < 10.
+ cmp(Operand(length), Immediate(10));
+ j(less_equal, &short_string);
+
+ ASSERT(source.is(esi));
+ ASSERT(destination.is(edi));
+ ASSERT(length.is(ecx));
+
+ // Because source is 4-byte aligned in our uses of this function,
+ // we keep source aligned for the rep_movs call by copying the odd bytes
+ // at the end of the ranges.
+ mov(scratch, Operand(source, length, times_1, -4));
+ mov(Operand(destination, length, times_1, -4), scratch);
+ mov(scratch, ecx);
+ shr(ecx, 2);
+ rep_movs();
+ and_(Operand(scratch), Immediate(0x3));
+ add(destination, Operand(scratch));
+ jmp(&done);
+
+ bind(&short_string);
+ test(length, Operand(length));
+ j(zero, &done);
+
+ bind(&short_loop);
+ mov_b(scratch, Operand(source, 0));
+ mov_b(Operand(destination, 0), scratch);
+ inc(source);
+ inc(destination);
+ dec(length);
+ j(not_zero, &short_loop);
+
+ bind(&done);
}
@@ -1715,7 +1713,7 @@ void MacroAssembler::Abort(const char* msg) {
}
#endif
// Disable stub call restrictions to always allow calls to abort.
- set_allow_stub_calls(true);
+ AllowStubCallsScope allow_scope(this, true);
push(eax);
push(Immediate(p0));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6f5fa87297..6f180c6c2b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -386,22 +386,13 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
- // All registers must be distinct. Only current_string needs valid contents
- // on entry. All registers may be invalid on exit. result_operand is
- // unchanged, padding_chars is updated correctly.
- // The top of new space must contain a sequential ascii string with
- // padding_chars bytes free in its top word. The sequential ascii string
- // current_string is concatenated to it, allocating the necessary amount
- // of new memory.
- void AppendStringToTopOfNewSpace(
- Register current_string, // Tagged pointer to string to copy.
- Register current_string_length,
- Register result_pos,
- Register scratch,
- Register new_padding_chars,
- Operand operand_result,
- Operand operand_padding_chars,
- Label* bailout);
+ // Copy memory, byte-by-byte, from source to destination. Not optimized for
+ // long or aligned copies.
+ // The contents of index and scratch are destroyed.
+ void CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch);
// ---------------------------------------------------------------------------
// Support functions.
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d435a70775..1213448841 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -211,9 +211,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
// If input is ASCII, don't even bother calling here if the string to
// match contains a non-ascii character.
if (mode_ == ASCII) {
- for (int i = 0; i < str.length(); i++) {
- ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
- }
+ ASSERT(String::IsAscii(str.start(), str.length()));
}
#endif
int byte_length = str.length() * char_size();
@@ -654,7 +652,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
ASSERT(FAILURE == 0); // Return value for failure is zero.
- __ xor_(eax, Operand(eax)); // zero eax.
+ __ Set(eax, Immediate(0));
__ jmp(&exit_label_);
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 99888b0898..bcb02ed797 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1686,6 +1686,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label miss;
Label index_out_of_range;
+
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.