summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h38
-rw-r--r--deps/v8/src/x64/assembler-x64.cc209
-rw-r--r--deps/v8/src/x64/assembler-x64.h403
-rw-r--r--deps/v8/src/x64/builtins-x64.cc88
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc908
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h50
-rw-r--r--deps/v8/src/x64/codegen-x64.cc99
-rw-r--r--deps/v8/src/x64/codegen-x64.h27
-rw-r--r--deps/v8/src/x64/cpu-x64.cc37
-rw-r--r--deps/v8/src/x64/debug-x64.cc72
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc43
-rw-r--r--deps/v8/src/x64/disasm-x64.cc27
-rw-r--r--deps/v8/src/x64/frames-x64.cc27
-rw-r--r--deps/v8/src/x64/frames-x64.h38
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc319
-rw-r--r--deps/v8/src/x64/ic-x64.cc35
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc553
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h35
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc27
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h27
-rw-r--r--deps/v8/src/x64/lithium-x64.cc188
-rw-r--r--deps/v8/src/x64/lithium-x64.h72
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc500
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h82
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc77
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h74
-rw-r--r--deps/v8/src/x64/simulator-x64.cc27
-rw-r--r--deps/v8/src/x64/simulator-x64.h27
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc88
29 files changed, 1759 insertions, 2438 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index a559b62758..be99022890 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
@@ -43,7 +20,8 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 6;
+// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
+static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
void Assembler::emitl(uint32_t x) {
@@ -392,12 +370,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// movq(rsp, rbp); pop(rbp); ret(n); int3 *6
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
0xCC;
-#else
- return false;
-#endif
}
@@ -483,14 +457,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -512,14 +484,12 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 60383da015..306a54d82b 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -52,13 +29,13 @@ ExternalReference ExternalReference::cpu_features() {
}
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool serializer_enabled) {
ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
#ifdef DEBUG
initialized_ = true;
#endif
supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
+ if (serializer_enabled) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
@@ -467,24 +444,30 @@ void Assembler::emit_operand(int code, const Operand& adr) {
// Assembler Instruction implementations.
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& op,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(opcode);
emit_operand(reg, op);
}
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ Register rm_reg,
+ int size) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
+ emit_rex(rm_reg, reg, size);
emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_rex_64(reg, rm_reg);
+ emit_rex(reg, rm_reg, size);
emit(opcode);
emit_modrm(reg, rm_reg);
}
@@ -520,37 +503,45 @@ void Assembler::arithmetic_op_16(byte opcode,
}
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg);
+ }
+ emit(opcode);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
+ if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(rm_reg, reg);
+ }
+ emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_optional_rex_32(reg, rm_reg);
+ if (!reg.is_byte_register() || !rm_reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, rm_reg);
+ }
emit(opcode);
emit_modrm(reg, rm_reg);
}
}
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_modrm(subcode, dst);
@@ -567,9 +558,10 @@ void Assembler::immediate_arithmetic_op(byte subcode,
void Assembler::immediate_arithmetic_op(byte subcode,
const Operand& dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
@@ -621,43 +613,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
@@ -675,8 +630,8 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Immediate src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst);
}
ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
@@ -685,15 +640,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
}
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+void Assembler::shift(Register dst,
+ Immediate shift_amount,
+ int subcode,
+ int size) {
EnsureSpace ensure_space(this);
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
+ : is_uint5(shift_amount.value_));
if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
@@ -701,38 +660,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
}
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
+void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xD3);
emit_modrm(subcode, dst);
}
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -1431,6 +1366,15 @@ void Assembler::movl(const Operand& dst, Label* src) {
}
+void Assembler::movsxbl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBE);
+ emit_operand(dst, src);
+}
+
+
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1440,6 +1384,15 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
}
+void Assembler::movsxwl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBF);
+ emit_operand(dst, src);
+}
+
+
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -2977,12 +2930,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
// Don't record external references unless the heap will be serialized.
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
@@ -3016,16 +2964,17 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
-MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
- UNREACHABLE();
- return NULL;
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support.
- UNREACHABLE();
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index d47ca32e0d..685d46c094 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -357,6 +357,10 @@ inline Condition ReverseCondition(Condition cc) {
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(Smi* value) {
+ ASSERT(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
+ value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+ }
private:
int32_t value_;
@@ -446,7 +450,7 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe();
+ static void Probe(bool serializer_enabled);
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
@@ -459,15 +463,11 @@ class CpuFeatures : public AllStatic {
return Check(f, supported_);
}
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ !(Serializer::enabled(isolate) &&
+ Check(f, found_by_runtime_probing_only_)));
}
static bool VerifyCrossCompiling() {
@@ -480,6 +480,8 @@ class CpuFeatures : public AllStatic {
(cross_compile_ & mask) == mask;
}
+ static bool SupportsCrankshaft() { return true; }
+
private:
static bool Check(CpuFeature f, uint64_t set) {
return (set & flag2set(f)) != 0;
@@ -532,6 +534,18 @@ class CpuFeatures : public AllStatic {
V(xor)
+// Shift instructions on operands/registers with kPointerSize, kInt32Size and
+// kInt64Size.
+#define SHIFT_INSTRUCTION_LIST(V) \
+ V(rol, 0x0) \
+ V(ror, 0x1) \
+ V(rcl, 0x2) \
+ V(rcr, 0x3) \
+ V(shl, 0x4) \
+ V(shr, 0x5) \
+ V(sar, 0x7) \
+
+
class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
@@ -680,6 +694,8 @@ class Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
+ STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
+
#define DECLARE_INSTRUCTION(instruction) \
template<class P1> \
void instruction##p(P1 p1) { \
@@ -776,7 +792,9 @@ class Assembler : public AssemblerBase {
void movq(Register dst, int64_t value);
void movq(Register dst, uint64_t value);
+ void movsxbl(Register dst, const Operand& src);
void movsxbq(Register dst, const Operand& src);
+ void movsxwl(Register dst, const Operand& src);
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
@@ -806,15 +824,15 @@ class Assembler : public AssemblerBase {
void cmpb_al(Immediate src);
void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
+ arithmetic_op_8(0x38, src, dst);
}
void cmpb(const Operand& dst, Immediate src) {
@@ -856,33 +874,32 @@ class Assembler : public AssemblerBase {
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void roll(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
+ void instruction##p(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p_cl(Register dst) { \
+ shift(dst, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l_cl(Register dst) { \
+ shift(dst, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q_cl(Register dst) { \
+ shift(dst, subcode, kInt64Size); \
+ }
+ SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
+#undef DECLARE_SHIFT_INSTRUCTION
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -890,60 +907,6 @@ class Assembler : public AssemblerBase {
// Shifts src:dst right by cl bits, affecting only dst.
void shrd(Register dst, Register src);
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
@@ -1214,7 +1177,7 @@ class Assembler : public AssemblerBase {
void RecordComment(const char* msg, bool force = false);
// Allocate a constant pool of the correct size for the generated code.
- MaybeObject* AllocateConstantPool(Heap* heap);
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
@@ -1425,14 +1388,16 @@ class Assembler : public AssemblerBase {
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
+ void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
+ void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg);
void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
+ void arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& rm_reg,
+ int size);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
Register dst,
@@ -1447,20 +1412,20 @@ class Assembler : public AssemblerBase {
void immediate_arithmetic_op_16(byte subcode,
const Operand& dst,
Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src,
+ int size);
+ void immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src,
+ int size);
// Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
+ void shift(Register dst, Immediate shift_amount, int subcode, int size);
// Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
+ void shift(Register dst, int subcode, int size);
void emit_farith(int b1, int b2, int i);
@@ -1473,138 +1438,63 @@ class Assembler : public AssemblerBase {
// Arithmetics
void emit_add(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x03, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x03, dst, src);
- }
+ arithmetic_op(0x03, dst, src, size);
}
void emit_add(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x0, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x0, dst, src);
- }
+ immediate_arithmetic_op(0x0, dst, src, size);
}
void emit_add(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x03, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x03, dst, src);
- }
+ arithmetic_op(0x03, dst, src, size);
}
void emit_add(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x1, src, dst);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x1, src, dst);
- }
+ arithmetic_op(0x1, src, dst, size);
}
void emit_add(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x0, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x0, dst, src);
- }
+ immediate_arithmetic_op(0x0, dst, src, size);
}
void emit_and(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x23, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x23, dst, src);
- }
+ arithmetic_op(0x23, dst, src, size);
}
void emit_and(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x23, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x23, dst, src);
- }
+ arithmetic_op(0x23, dst, src, size);
}
void emit_and(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x21, src, dst);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x21, src, dst);
- }
+ arithmetic_op(0x21, src, dst, size);
}
void emit_and(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x4, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x4, dst, src);
- }
+ immediate_arithmetic_op(0x4, dst, src, size);
}
void emit_and(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x4, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x4, dst, src);
- }
+ immediate_arithmetic_op(0x4, dst, src, size);
}
void emit_cmp(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x3B, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x3B, dst, src);
- }
+ arithmetic_op(0x3B, dst, src, size);
}
void emit_cmp(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x3B, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x3B, dst, src);
- }
+ arithmetic_op(0x3B, dst, src, size);
}
void emit_cmp(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x39, src, dst);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x39, src, dst);
- }
+ arithmetic_op(0x39, src, dst, size);
}
void emit_cmp(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x7, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x7, dst, src);
- }
+ immediate_arithmetic_op(0x7, dst, src, size);
}
void emit_cmp(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x7, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x7, dst, src);
- }
+ immediate_arithmetic_op(0x7, dst, src, size);
}
void emit_dec(Register dst, int size);
@@ -1644,99 +1534,49 @@ class Assembler : public AssemblerBase {
void emit_not(const Operand& dst, int size);
void emit_or(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x0B, dst, src);
- } else {
- arithmetic_op_32(0x0B, dst, src);
- }
+ arithmetic_op(0x0B, dst, src, size);
}
void emit_or(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x0B, dst, src);
- } else {
- arithmetic_op_32(0x0B, dst, src);
- }
+ arithmetic_op(0x0B, dst, src, size);
}
void emit_or(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x9, src, dst);
- } else {
- arithmetic_op_32(0x9, src, dst);
- }
+ arithmetic_op(0x9, src, dst, size);
}
void emit_or(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x1, dst, src);
- } else {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
+ immediate_arithmetic_op(0x1, dst, src, size);
}
void emit_or(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x1, dst, src);
- } else {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
+ immediate_arithmetic_op(0x1, dst, src, size);
}
void emit_repmovs(int size);
void emit_sbb(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x1b, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x1b, dst, src);
- }
+ arithmetic_op(0x1b, dst, src, size);
}
void emit_sub(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x2B, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x2B, dst, src);
- }
+ arithmetic_op(0x2B, dst, src, size);
}
void emit_sub(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x5, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x5, dst, src);
- }
+ immediate_arithmetic_op(0x5, dst, src, size);
}
void emit_sub(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x2B, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x2B, dst, src);
- }
+ arithmetic_op(0x2B, dst, src, size);
}
void emit_sub(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x29, src, dst);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x29, src, dst);
- }
+ arithmetic_op(0x29, src, dst, size);
}
void emit_sub(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x5, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x5, dst, src);
- }
+ immediate_arithmetic_op(0x5, dst, src, size);
}
void emit_test(Register dst, Register src, int size);
@@ -1748,52 +1588,29 @@ class Assembler : public AssemblerBase {
void emit_xchg(Register dst, Register src, int size);
void emit_xor(Register dst, Register src, int size) {
- if (size == kInt64Size) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
+ if (size == kInt64Size && dst.code() == src.code()) {
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
+ arithmetic_op(0x33, dst, src, kInt32Size);
} else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x33, dst, src);
+ arithmetic_op(0x33, dst, src, size);
}
}
void emit_xor(Register dst, const Operand& src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x33, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x33, dst, src);
- }
+ arithmetic_op(0x33, dst, src, size);
}
void emit_xor(Register dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x6, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x6, dst, src);
- }
+ immediate_arithmetic_op(0x6, dst, src, size);
}
void emit_xor(const Operand& dst, Immediate src, int size) {
- if (size == kInt64Size) {
- immediate_arithmetic_op(0x6, dst, src);
- } else {
- ASSERT(size == kInt32Size);
- immediate_arithmetic_op_32(0x6, dst, src);
- }
+ immediate_arithmetic_op(0x6, dst, src, size);
}
void emit_xor(const Operand& dst, Register src, int size) {
- if (size == kInt64Size) {
- arithmetic_op(0x31, src, dst);
- } else {
- ASSERT(size == kInt32Size);
- arithmetic_op_32(0x31, src, dst);
- }
+ arithmetic_op(0x31, src, dst, size);
}
friend class CodePatcher;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index d5b1a73868..9e3b89ac61 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -163,13 +140,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
__ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
-#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
@@ -214,7 +189,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
__ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
+ __ shlp(rdi, Immediate(kPointerSizeLog2));
if (create_memento) {
__ addp(rdi, Immediate(AllocationMemento::kSize));
}
@@ -600,7 +575,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// No type feedback cell is available
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(rax);
@@ -749,7 +724,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// Tear down internal frame.
}
- __ Pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ DropUnderReturnAddress(1); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -926,12 +901,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&shift_arguments);
{ Label loop;
__ movp(rcx, rax);
+ StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
- __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ movp(args.GetArgumentOperand(0), rbx);
__ decp(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ popq(rbx); // Discard copy of return address.
+ __ j(not_zero, &loop); // While non-zero.
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
__ decp(rax); // One fewer argument (first argument is new receiver).
}
@@ -963,9 +939,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpp(rax, rbx);
__ j(not_equal,
@@ -1018,7 +993,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ Push(Operand(rbp, kFunctionOffset));
__ Push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -1323,6 +1298,32 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdi: function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, rdx);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movp(rdx, rbx);
+ __ shlp(rdx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rdx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pushq(rbp);
__ movp(rbp, rsp);
@@ -1368,6 +1369,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpp(rax, rbx);
@@ -1440,6 +1444,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index c949a423a2..546595ad41 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -40,7 +17,6 @@ namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rbx };
descriptor->register_param_count_ = 1;
@@ -51,7 +27,6 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
void FastNewContextStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdi };
descriptor->register_param_count_ = 1;
@@ -61,7 +36,6 @@ void FastNewContextStub::InitializeInterfaceDescriptor(
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -71,7 +45,6 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -82,7 +55,6 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx };
descriptor->register_param_count_ = 3;
@@ -94,7 +66,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
@@ -105,7 +76,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rbx, rdx };
descriptor->register_param_count_ = 2;
@@ -115,7 +85,6 @@ void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -126,7 +95,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -137,7 +105,6 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rcx, rbx, rax };
descriptor->register_param_count_ = 3;
@@ -148,7 +115,6 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -158,7 +124,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx };
descriptor->register_param_count_ = 1;
@@ -168,7 +133,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
void StringLengthStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rcx };
descriptor->register_param_count_ = 2;
@@ -178,7 +142,6 @@ void StringLengthStub::InitializeInterfaceDescriptor(
void KeyedStringLengthStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -188,7 +151,6 @@ void KeyedStringLengthStub::InitializeInterfaceDescriptor(
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -199,7 +161,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx };
descriptor->register_param_count_ = 2;
@@ -210,7 +171,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -239,7 +199,6 @@ static void InitializeArrayConstructorDescriptor(
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -267,49 +226,42 @@ static void InitializeInternalArrayConstructorDescriptor(
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -317,12 +269,11 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -330,12 +281,11 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -346,7 +296,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
@@ -357,19 +306,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
}
void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rcx, rdx, rax };
descriptor->register_param_count_ = 3;
@@ -380,7 +327,6 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
void StringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -479,10 +425,9 @@ void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
@@ -506,11 +451,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
__ LoadAddress(arg_reg_1,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
__ PopCallerSaved(save_doubles_);
__ ret(0);
@@ -871,11 +816,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in rax.
@@ -893,7 +838,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 2);
+ ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
@@ -914,7 +859,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->prototype_string());
+ __ Cmp(rax, isolate()->factory()->prototype_string());
__ j(not_equal, &miss);
receiver = rdx;
} else {
@@ -1000,7 +945,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ SmiToInteger64(rbx, args.GetArgumentOperand(2));
@@ -1363,11 +1308,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
__ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
@@ -1519,7 +1463,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -1530,7 +1474,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ LoadAddress(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
kScratchRegister);
@@ -1556,8 +1500,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#endif
// Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ r8, ExternalReference::address_of_static_offsets_vector(isolate()));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
__ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
@@ -1682,8 +1626,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
@@ -1716,7 +1660,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate);
+ Isolate::kPendingExceptionAddress, isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
@@ -1829,7 +1773,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
Condition cc = GetCondition();
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
Label miss;
CheckInputType(masm, rdx, left_, &miss);
@@ -2163,7 +2107,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rbx);
- CreateAllocationSiteStub create_stub;
+ CreateAllocationSiteStub create_stub(isolate);
__ CallStub(&create_stub);
__ Pop(rbx);
@@ -2197,14 +2141,77 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, cont);
+
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rcx.
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, cont);
+}
+
+
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ int argc,
+ Label* non_function) {
+ // Check for function proxy.
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, non_function);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdi); // put proxy as additional argument under return address
+ __ PushReturnAddressFrom(rcx);
+ __ Set(rax, argc + 1);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ movp(args->GetReceiverOperand(), rdi);
+ __ Set(rax, argc);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor =
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(rdi);
+ }
+ __ movp(args->GetReceiverOperand(), rax);
+ __ jmp(cont);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : feedback vector
- // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
// rdi : the function to call
+
+ // wrap_and_call can only be true if we are compiling a monomorphic method.
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
- StackArgumentsAccessor args(rsp, argc_);
+ int argc = argc_;
+ StackArgumentsAccessor args(rsp, argc);
if (NeedsChecks()) {
// Check that the function really is a JavaScript function.
@@ -2213,35 +2220,16 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Goto slow case if we do not have a function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- // Type information was updated. Because we may call Array, which
- // expects either undefined or an AllocationSite in rbx we need
- // to set rbx to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- }
}
// Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
+ ParameterCount actual(argc);
if (CallAsMethod()) {
if (NeedsChecks()) {
- // Do not transform the receiver for strict mode functions.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &cont);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rcx.
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &cont);
+ EmitContinueIfStrictOrNative(masm, &cont);
}
-
// Load the receiver from the stack.
__ movp(rax, args.GetReceiverOperand());
@@ -2256,59 +2244,18 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&cont);
}
+
__ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
if (NeedsChecks()) {
// Slow-case: Non-function called.
__ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (megamorphic symbol) so no write barrier is needed.
- __ SmiToInteger32(rdx, rdx);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize),
- TypeFeedbackInfo::MegamorphicSentinel(isolate));
- __ Integer32ToSmi(rdx, rdx);
- }
- // Check for function proxy.
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ PopReturnAddressTo(rcx);
- __ Push(rdi); // put proxy as additional argument under return address
- __ PushReturnAddressFrom(rcx);
- __ Set(rax, argc_ + 1);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ movp(args.GetReceiverOperand(), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
}
if (CallAsMethod()) {
__ bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ Push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Pop(rdi);
- }
- __ movp(args.GetReceiverOperand(), rax);
- __ jmp(&cont);
+ EmitWrapCase(masm, &args, &cont);
}
}
@@ -2374,11 +2321,125 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // rdi - function
+ // rbx - vector
+ // rdx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ StackArgumentsAccessor args(rsp, argc);
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, rbx);
+
+ // The checks. First, does rdi match the recorded monomorphic target?
+ __ SmiToInteger32(rdx, rdx);
+ __ cmpq(rdi, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, &args, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ j(equal, &slow_start);
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that function is not a smi.
+ __ JumpIfSmi(rdi, &non_function);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(rcx);
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCallIC_Miss),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ movp(rdi, rax);
+ }
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -2401,26 +2462,35 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ save_doubles.GetCode();
}
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+#ifdef _WIN64
+ int arg_stack_space = (result_size_ < 2 ? 2 : 4);
+#else
+ int arg_stack_space = 0;
+#endif
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
+
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
+ // r15: argv pointer (C callee-saved).
// Simple results returned in rax (both AMD64 and Win64 calling conventions).
// Complex results must be written to address passed as first argument.
@@ -2431,25 +2501,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ CheckStackAlignment();
}
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
- __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movp(arg_reg_1, rax);
- __ Move(kScratchRegister,
- ExternalReference::perform_gc_function(masm->isolate()));
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
// Call C function.
#ifdef _WIN64
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
@@ -2460,7 +2511,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Return result in single register (rax).
__ movp(rcx, r14); // argc.
__ movp(rdx, r15); // argv.
- __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(r8, ExternalReference::isolate_address(isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
@@ -2468,26 +2519,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Pass a pointer to the Arguments object as the second argument.
__ movp(rdx, r14); // argc.
__ movp(r8, r15); // argv.
- __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(r9, ExternalReference::isolate_address(isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movp(rdi, r14); // argc.
__ movp(rsi, r15); // argv.
- __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(rdx, ExternalReference::isolate_address(isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
#ifdef _WIN64
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
@@ -2499,121 +2542,65 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ leap(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(rax, Heap::kExceptionRootIndex);
+ __ j(equal, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
+ __ cmpp(r14, pending_exception_operand);
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ movp(rax, pending_exception_operand);
// Clear the pending exception.
- pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ movp(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
+ __ j(equal, &throw_termination_exception);
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ Move(rax, failure, Assembler::RelocInfoNone());
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- true,
- true);
-
- { FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(0);
- __ CallCFunction(
- ExternalReference::out_of_memory_function(masm->isolate()), 0);
- }
+ __ Throw(rax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
-
- __ bind(&throw_normal_exception);
- __ Throw(rax);
}
@@ -2669,17 +2656,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ InitializeRootRegister();
}
- Isolate* isolate = masm->isolate();
-
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
__ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ Load(rax, js_entry_sp);
__ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
@@ -2700,9 +2685,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
+ isolate());
__ Store(pending_exception, rax);
- __ Move(rax, Failure::Exception(), Assembler::RelocInfoNone());
+ __ LoadRoot(rax, Heap::kExceptionRootIndex);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -2724,10 +2709,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// at the time this code is generated.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ isolate());
__ Load(rax, construct_entry);
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ Load(rax, entry);
}
__ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
@@ -2800,17 +2785,19 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// indicate that the value is not an instance.
static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = 18;
+ static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
// The last 4 bytes of the instruction sequence
- // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
+ // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
// Move(kScratchRegister, Factory::the_hole_value())
// in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
+ static const unsigned int kWordBeforeMapCheckValue =
+ kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
// The last 4 bytes of the instruction sequence
// __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4906;
+ static const unsigned int kWordBeforeResultValue =
+ kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
@@ -3121,7 +3108,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
- __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
+ // Number of doublewords to copy.
+ __ shrl(count, Immediate(kPointerSizeLog2));
__ repmovsp();
// Find number of bytes left.
@@ -3248,7 +3236,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
@@ -3593,7 +3581,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->string_compare_native(), 1);
__ ret(2 * kPointerSize);
@@ -3617,227 +3605,30 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void ArrayPushStub::Generate(MacroAssembler* masm) {
- int argc = arguments_count();
-
- StackArgumentsAccessor args(rsp, argc);
- if (argc == 0) {
- // Noop, return the length.
- __ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- return;
- }
-
- Isolate* isolate = masm->isolate();
-
- if (argc != 1) {
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- Label call_builtin, attempt_to_grow_elements, with_write_barrier;
-
- // Get the elements array of the object.
- __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- isolate->factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
- }
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movp(rcx, args.GetArgumentOperand(1));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Store the value.
- __ movp(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
- } else {
- __ j(greater, &call_builtin);
-
- __ movp(rcx, args.GetArgumentOperand(1));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- if (IsFastDoubleElementsKind(elements_kind())) {
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- __ bind(&with_write_barrier);
-
- if (IsFastSmiElementsKind(elements_kind())) {
- if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
-
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- isolate->factory()->heap_number_map());
- __ j(equal, &call_builtin);
-
- ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
- ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- __ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX));
- const int header_size = FixedArrayBase::kHeaderSize;
- // Verify that the object can be transitioned in place.
- const int origin_offset = header_size + elements_kind() * kPointerSize;
- __ movp(rdi, FieldOperand(rbx, origin_offset));
- __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
- __ j(not_equal, &call_builtin);
-
- const int target_offset = header_size + target_kind * kPointerSize;
- __ movp(rbx, FieldOperand(rbx, target_offset));
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, DONT_TRACK_ALLOCATION_SITE, NULL);
- __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ leap(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movp(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- __ movp(rbx, args.GetArgumentOperand(1));
- // Growing elements that are SMI-only requires special handling in case the
- // new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- ASSERT(kAllocationDelta >= argc);
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ leap(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpp(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
- __ cmpp(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movp(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- if (IsFastObjectElementsKind(elements_kind())) {
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to tell
- // the incremental marker to rescan the object that we just grew. We don't
- // need to worry about the holes because they are in old space and already
- // marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
- }
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movp(rdx, args.GetReceiverOperand());
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : left
// -- rax : right
// -- rsp[0] : return address
// -----------------------------------
- Isolate* isolate = masm->isolate();
// Load rcx with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(rcx, handle(isolate->heap()->undefined_value()));
+ __ Move(rcx, handle(isolate()->heap()->undefined_value()));
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
__ testb(rcx, Immediate(kSmiTagMask));
__ Assert(not_equal, kExpectedAllocationSite);
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- isolate->factory()->allocation_site_map());
+ isolate()->factory()->allocation_site_map());
__ Assert(equal, kExpectedAllocationSite);
}
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
__ TailCallStub(&stub);
}
@@ -3883,7 +3674,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rax, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
@@ -3893,7 +3684,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rdx, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -3918,13 +3709,13 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rax, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rax, isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
__ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
@@ -3934,7 +3725,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rdx, isolate()->factory()->undefined_value());
__ j(equal, &unordered);
}
@@ -4160,7 +3951,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdx);
@@ -4233,7 +4024,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ bind(&good);
}
- NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
__ Push(Immediate(name->Hash()));
__ CallStub(&stub);
@@ -4283,7 +4075,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ j(equal, done);
}
- NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
+ POSITIVE_LOOKUP);
__ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
@@ -4344,7 +4137,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ Cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
// Stop if found the property.
@@ -4387,10 +4180,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -4489,14 +4282,13 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
// TODO(gc) Can we just set address arg2 in the beginning?
__ Move(arg_reg_2, address);
__ LoadAddress(arg_reg_3,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
int argument_count = 3;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
__ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
+ ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -4674,8 +4466,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ movp(rbx, MemOperand(rbp, parameter_count_offset));
@@ -4691,7 +4483,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
}
@@ -4716,7 +4508,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
Assembler::RelocInfoNone());
AllowExternalCallThatCantCauseGC scope(masm);
@@ -4738,7 +4530,7 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(), mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -4748,7 +4540,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- T stub(kind);
+ T stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -4797,12 +4589,14 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
@@ -4832,7 +4626,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -4851,11 +4645,11 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -4876,12 +4670,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -4964,7 +4758,7 @@ void InternalArrayConstructorStub::GenerateCase(
__ testp(rax, rax);
__ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(kind);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
@@ -4980,16 +4774,16 @@ void InternalArrayConstructorStub::GenerateCase(
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
__ TailCallStub(&stubN);
}
@@ -5024,7 +4818,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ andp(rcx, Immediate(Map::kElementsKindMask));
- __ shr(rcx, Immediate(Map::kElementsKindShift));
+ __ shrp(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
Label done;
@@ -5105,7 +4899,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ Push(scratch);
// isolate
__ Move(scratch,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
__ Push(scratch);
// holder
__ Push(holder);
@@ -5143,7 +4937,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// v8::InvocationCallback's argument.
__ leap(arguments_arg, StackSpaceOperand(0));
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
// Accessor for FunctionCallbackInfo and first js arg.
StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
@@ -5155,7 +4950,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
__ CallApiFunctionAndReturn(
api_function_address,
- thunk_address,
+ thunk_ref,
callback_arg,
argc + FCA::kArgsLength + 1,
return_value_operand,
@@ -5202,7 +4997,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// could be used to pass arguments.
__ leap(accessor_info_arg, StackSpaceOperand(0));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
// It's okay if api_function_address == getter_arg
// but not accessor_info_arg or name_arg
@@ -5215,7 +5011,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
PropertyCallbackArguments::kArgsLength - 1 -
PropertyCallbackArguments::kReturnValueOffset);
__ CallApiFunctionAndReturn(api_function_address,
- thunk_address,
+ thunk_ref,
getter_arg,
kStackSpace,
return_value_operand,
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 8c8ab691ac..2d6d21d0ab 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODE_STUBS_X64_H_
#define V8_X64_CODE_STUBS_X64_H_
@@ -38,8 +15,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
@@ -86,7 +63,7 @@ class StringHelper : public AllStatic {
class SubStringStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
@@ -98,7 +75,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() {}
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
// Compares two flat ASCII strings and returns result in rax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -137,11 +114,16 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Register dictionary,
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
Register result,
Register index,
LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary),
+ result_(result),
+ index_(index),
+ mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -197,12 +179,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 9b92dc8673..9903017700 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -256,12 +233,20 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
+ if (kPointerSize == kDoubleSize) {
+ // Check backing store for COW-ness. For COW arrays we have to
+ // allocate a new backing store.
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &new_backing_store);
+ } else {
+ // For x32 port we have to allocate a new backing store as SMI size is
+ // not equal with double size.
+ ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ jmp(&new_backing_store);
+ }
+
// Check if the backing store is in new-space. If not, we need to allocate
// a new one since the old one is in pointer-space.
// If in new space, we can reuse the old backing store because it is
@@ -608,10 +593,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
__ andq(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
+ __ shrq(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
+ __ shlq(temp1, Immediate(52));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
@@ -631,37 +616,36 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->pushq(rbp);
- patcher.masm()->movp(rbp, rsp);
- patcher.masm()->Push(rsi);
- patcher.masm()->Push(rdi);
- initialized = true;
- }
- return sequence;
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->pushq(rbp);
+ patcher.masm()->movp(rbp, rsp);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -678,10 +662,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index e637ff0061..540bba77c8 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 4fa290a8b5..9243e2fb5e 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for x64 independent of OS goes here.
@@ -41,16 +18,6 @@
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 36d5df678e..6e0e05fda8 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -37,8 +14,6 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -50,7 +25,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -80,7 +55,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -124,7 +99,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
- __ PushInt64AsTwoSmis(reg);
+ __ PushRegisterAsTwoSmis(reg);
}
}
@@ -134,7 +109,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(rax, 0); // No arguments (argc == 0).
__ Move(rbx, ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -149,7 +124,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ PopInt64AsTwoSmis(reg);
+ __ PopRegisterAsTwoSmis(reg);
}
}
@@ -177,6 +152,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
+void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- rdx : type feedback slot (smi)
+ // -- rdi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rdx.bit() | rdi.bit(), 0, false);
+}
+
+
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
@@ -230,15 +215,6 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x64.cc).
// ----------- S t a t e -------------
@@ -257,18 +233,6 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rbx: feedback array
- // -- rdx: slot in feedback array
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
- 0, false);
-}
-
-
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
@@ -348,8 +312,6 @@ const bool Debug::kFrameDropperSupported = true;
#undef __
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 4bc644defe..9016d4b754 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -230,7 +207,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ Pop(Operand(rbx, offset));
+ __ PopQuad(Operand(rbx, offset));
}
// Fill in the double input registers.
@@ -307,13 +284,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push state, pc, and continuation from the last output frame.
__ Push(Operand(rbx, FrameDescription::state_offset()));
- __ Push(Operand(rbx, FrameDescription::pc_offset()));
- __ Push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ Push(Operand(rbx, offset));
+ __ PushQuad(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -352,11 +329,19 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ if (kPCOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of PC for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index b870eae854..bef2f82dfa 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <assert.h>
#include <stdio.h>
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 3154d80a60..7121d68cd3 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 1fb77ffa6c..43c11961c3 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
@@ -56,11 +33,12 @@ class EntryFrameConstants : public AllStatic {
static const int kXMMRegistersBlockSize =
kXMMRegisterSize * kCalleeSaveXMMRegisters;
static const int kCallerFPOffset =
- -10 * kPointerSize - kXMMRegistersBlockSize;
+ -3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
#else
- static const int kCallerFPOffset = -8 * kPointerSize;
+ // We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody.
+ static const int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
#endif
- static const int kArgvOffset = 6 * kPointerSize;
+ static const int kArgvOffset = 6 * kPointerSize;
};
@@ -132,6 +110,10 @@ inline Object* JavaScriptFrame::function_slot_object() const {
inline void StackHandler::SetFp(Address slot, Address fp) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ Memory::Address_at(slot + kPointerSize) = 0;
+ }
Memory::Address_at(slot) = fp;
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index f0b9438626..475080553a 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -74,7 +51,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitPatchInfo() {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
+ ASSERT(is_uint8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
@@ -107,11 +84,15 @@ static void EmitStackCheck(MacroAssembler* masm_,
Isolate* isolate = masm_->isolate();
Label ok;
ASSERT(scratch.is(rsp) == (pointers == 0));
+ Heap::RootListIndex index;
if (pointers != 0) {
- __ movq(scratch, rsp);
- __ subq(scratch, Immediate(pointers * kPointerSize));
+ __ movp(scratch, rsp);
+ __ subp(scratch, Immediate(pointers * kPointerSize));
+ index = Heap::kRealStackLimitRootIndex;
+ } else {
+ index = Heap::kStackLimitRootIndex;
}
- __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ CompareRoot(scratch, index);
__ j(above_equal, &ok, Label::kNear);
__ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
@@ -136,8 +117,6 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- InitializeFeedbackVector();
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -195,7 +174,7 @@ void FullCodeGenerator::Generate() {
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
- __ movq(rcx, Immediate(loop_iterations));
+ __ movp(rcx, Immediate(loop_iterations));
Label loop_header;
__ bind(&loop_header);
// Do pushes.
@@ -203,7 +182,7 @@ void FullCodeGenerator::Generate() {
__ Push(rdx);
}
// Continue loop if not done.
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_zero, &loop_header, Label::kNear);
}
int remaining = locals_count % kMaxPushes;
@@ -226,7 +205,7 @@ void FullCodeGenerator::Generate() {
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
} else {
__ Push(rdi);
@@ -287,7 +266,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -359,6 +338,9 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
+static const byte kJnsOffset = kPointerSize == kInt64Size ? 0x1d : 0x14;
+
+
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
@@ -369,17 +351,22 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
int weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ __ j(positive, &ok, Label::kNear);
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(masm_, kJnsOffset);
+ DontEmitDebugCodeScope dont_emit_debug_code_scope(masm_);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- EmitProfilingCounterReset();
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
+
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
@@ -432,11 +419,11 @@ void FullCodeGenerator::EmitReturnSequence() {
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ // have just generated at least 7 bytes: "movp rsp, rbp; pop rbp; ret k"
+ // (3 + 1 + 3) for x64 and at least 6 (2 + 1 + 3) bytes for x32.
+ const int kPadding = Assembler::kJSReturnSequenceLength -
+ kPointerSize == kInt64Size ? 7 : 6;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
@@ -444,7 +431,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
+
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -1159,15 +1146,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
-
// No need for a write barrier, we are storing a Smi in the feedback vector.
__ Move(rbx, FeedbackVector());
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
- Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
+ TypeFeedbackInfo::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1324,7 +1306,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
@@ -1643,8 +1627,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1658,7 +1642,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1793,11 +1777,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
+ isolate(),
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1821,7 +1806,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(),
+ mode,
+ allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1858,7 +1845,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
// Store the subexpression value in the array's elements.
__ Move(rcx, Smi::FromInt(i));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1875,7 +1862,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- ASSERT(expr->target()->IsValidLeftHandSide());
+ ASSERT(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
@@ -2108,7 +2095,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
- CallFunctionStub stub(1, CALL_AS_METHOD);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2164,9 +2151,8 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Push holes for arguments to generator function.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rdx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
@@ -2252,7 +2238,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
@@ -2317,8 +2303,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2365,16 +2351,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ Pop(rdx);
- BinaryOpICStub stub(op, mode);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- ASSERT(expr->IsValidLeftHandSide());
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2568,14 +2554,14 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- CallFunctionFlags flags;
- // Get the target function;
- if (callee->IsVariableProxy()) {
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2583,7 +2569,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
- flags = NO_CALL_FUNCTION_FLAGS;
} else {
// Load the function from the receiver.
ASSERT(callee->IsProperty());
@@ -2593,40 +2578,19 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
// Push the target function under the receiver.
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- flags = CALL_AS_METHOD;
- }
-
- // Load the arguments.
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, flags);
- __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
-
- RecordJSReturnSite(expr);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, rax);
+ EmitCall(expr, call_type);
}
// Common code for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
Expression* callee = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
// Load the function from the receiver.
ASSERT(callee->IsProperty());
@@ -2638,29 +2602,12 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- // Load the arguments.
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, CALL_AS_METHOD);
- __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, rax);
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2668,20 +2615,19 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ Move(rbx, FeedbackVector());
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
__ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
-
- // Record call targets in unoptimized code.
- CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
RecordJSReturnSite(expr);
+
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -2750,7 +2696,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
@@ -2758,7 +2704,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
} else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithIC(expr);
+ EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
@@ -2795,16 +2741,16 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// The receiver is either the global receiver or an object found by
// LoadContextSlot.
- EmitCallWithStub(expr);
+ EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
ASSERT(call_type == Call::OTHER_CALL);
@@ -2814,7 +2760,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
__ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
- EmitCallWithStub(expr);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2851,12 +2797,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
if (FLAG_pretenuring_call_new) {
- StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
- isolate()->factory()->NewAllocationSite());
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
ASSERT(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1);
}
@@ -2864,8 +2806,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Move(rbx, FeedbackVector());
__ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
@@ -3236,7 +3178,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3326,30 +3268,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kHiddenLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3362,7 +3283,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3512,7 +3433,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3553,7 +3474,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into rax and call the stub.
VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3679,7 +3600,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(1));
__ Pop(rdx);
- StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3692,32 +3613,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_log, 1);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3751,7 +3652,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -4177,7 +4078,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4313,7 +4214,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- ASSERT(expr->expression()->IsValidLeftHandSide());
+ ASSERT(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4400,7 +4301,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4430,8 +4331,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4546,12 +4447,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(rax, if_true);
__ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
@@ -4559,20 +4461,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, SYMBOL_TYPE, rdx);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
@@ -4581,14 +4483,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(rax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
__ j(equal, if_true);
__ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -4639,7 +4541,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testp(rax, rax);
@@ -4859,7 +4761,6 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
#ifdef DEBUG
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index ea118d0763..90a303dbae 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -421,9 +398,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash.
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
+ __ shrl(rdi, Immediate(String::kHashShift));
__ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ andp(rcx, Immediate(mask));
@@ -439,7 +416,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ movp(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
@@ -1303,7 +1280,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 894a4dd3a7..c3dc8ac30c 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -89,13 +66,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -219,7 +189,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
} else {
__ Push(rdi);
@@ -284,6 +254,12 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
+ instr->hydrogen_value()->representation().IsInteger32() &&
+ instr->result()->IsRegister()) {
+ __ AssertZeroExtended(ToRegister(instr->result()));
+ }
+
if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
if (instr->result()->IsRegister()) {
Register result_reg = ToRegister(instr->result());
@@ -687,6 +663,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -806,7 +783,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -982,18 +959,18 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1138,22 +1115,28 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
}
// If the divisor is negative, we have to negate and handle edge cases.
- Label not_kmin_int, done;
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr->environment());
}
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- // Note that we could emit branch-free code, but that would need one more
- // register.
- __ j(no_overflow, &not_kmin_int, Label::kNear);
- if (divisor == -1) {
- DeoptimizeIf(no_condition, instr->environment());
- } else {
- __ movl(dividend, Immediate(kMinInt / divisor));
- __ jmp(&done, Label::kNear);
- }
+
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sarl(dividend, Immediate(shift));
+ return;
}
+
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(overflow, instr->environment());
+ return;
+ }
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
__ bind(&not_kmin_int);
__ sarl(dividend, Immediate(shift));
__ bind(&done);
@@ -1205,11 +1188,64 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to rdx (= remainder).
+ __ cdq();
+ __ idivl(divisor);
+
+ Label done;
+ __ testl(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
+ __ bind(&done);
+}
+
+
void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
ASSERT(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
@@ -1261,7 +1297,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
}
__ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ negp(rdx);
+ if (divisor < 0) __ negl(rdx);
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ movl(rax, rdx);
@@ -1272,15 +1308,15 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->left());
- Register divisor = ToRegister(instr->right());
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
Register remainder = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
ASSERT(dividend.is(rax));
ASSERT(remainder.is(rdx));
- ASSERT(result.is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(!divisor.is(rax));
ASSERT(!divisor.is(rdx));
@@ -1314,15 +1350,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idivl(divisor);
- if (hdiv->IsMathFloorOfDiv()) {
- Label done;
- __ testl(remainder, remainder);
- __ j(zero, &done, Label::kNear);
- __ xorl(remainder, divisor);
- __ sarl(remainder, Immediate(31));
- __ addl(result, remainder);
- __ bind(&done);
- } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
@@ -1473,13 +1501,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ andp(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToOperand(right));
+ } else {
+ __ andp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_OR:
- __ orp(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToOperand(right));
+ } else {
+ __ orp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_XOR:
- __ xorp(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToOperand(right));
+ } else {
+ __ xorp(ToRegister(left), ToOperand(right));
+ }
break;
default:
UNREACHABLE();
@@ -1489,13 +1529,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ andp(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToRegister(right));
+ } else {
+ __ andp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_OR:
- __ orp(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToRegister(right));
+ } else {
+ __ orp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_XOR:
- __ xorp(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToRegister(right));
+ } else {
+ __ xorp(ToRegister(left), ToRegister(right));
+ }
break;
default:
UNREACHABLE();
@@ -1559,7 +1611,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ shl(ToRegister(left), Immediate(shift_count));
+ __ shlp(ToRegister(left), Immediate(shift_count));
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1602,7 +1654,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), instr->value());
+ Register dst = ToRegister(instr->result());
+ if (instr->value() == 0) {
+ __ xorl(dst, dst);
+ } else {
+ __ movl(dst, Immediate(instr->value()));
+ }
}
@@ -1634,8 +1691,16 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
- __ Move(ToRegister(instr->result()), value);
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ if (instr->hydrogen()->HasObjectMap()) {
+ Handle<Map> object_map = instr->hydrogen()->ObjectMap().handle();
+ ASSERT(object->IsHeapObject());
+ ASSERT(!object_map->is_stable() ||
+ *object_map == Handle<HeapObject>::cast(object)->map());
+ USE(object_map);
+ }
+ __ Move(ToRegister(instr->result()), object);
}
@@ -1775,6 +1840,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
+ ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
int32_t offset = ToInteger32(LConstantOperand::cast(right));
if (is_p) {
__ leap(ToRegister(instr->result()),
@@ -1793,6 +1859,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
+ ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
if (is_p) {
__ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
@@ -1940,8 +2007,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2534,10 +2601,10 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ Push(ToRegister(instr->left()));
__ Push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
@@ -2620,7 +2687,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
__ Push(ToRegister(instr->value()));
__ Push(instr->function());
@@ -2635,7 +2702,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
@@ -2706,7 +2773,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
- __ shl(reg, Immediate(kPointerSizeLog2));
+ __ shlp(reg, Immediate(kPointerSizeLog2));
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
@@ -2848,17 +2915,17 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Representation representation = access.representation();
- if (representation.IsSmi() &&
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
instr->hydrogen()->representation().IsInteger32()) {
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(object, offset), representation);
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+ }
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
@@ -2981,25 +3048,25 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
switch (elements_kind) {
case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
- __ movsxbq(result, operand);
+ __ movsxbl(result, operand);
break;
case EXTERNAL_UINT8_ELEMENTS:
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ movzxbp(result, operand);
+ __ movzxbl(result, operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
- __ movsxwq(result, operand);
+ __ movsxwl(result, operand);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ movzxwp(result, operand);
+ __ movzxwl(result, operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
- __ movsxlq(result, operand);
+ __ movl(result, operand);
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
@@ -3062,23 +3129,23 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
- if (representation.IsInteger32() &&
+ if (representation.IsInteger32() && SmiValuesAre32Bits() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+ }
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
@@ -3470,8 +3537,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
+ __ shlq(tmp2, Immediate(1));
+ __ shrq(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
@@ -3573,7 +3640,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(&done);
__ bind(&positive_sign);
}
@@ -3713,7 +3780,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
@@ -3721,14 +3788,14 @@ void LCodeGen::DoPower(LPower* instr) {
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3819,8 +3886,8 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3832,8 +3899,8 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3851,8 +3918,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
: DONT_OVERRIDE;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3864,18 +3931,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3929,7 +3998,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- Handle<Map> transition = instr->transition();
SmiCheck check_needed = hinstr->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -3948,20 +4016,22 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
- // We know that value is a smi now, so we can omit the check below.
+ // We know now that value is not a smi, so we can omit the check below.
check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
- ASSERT(transition.is_null());
ASSERT(access.IsInobject());
+ ASSERT(!hinstr->has_transition());
ASSERT(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
- if (!transition.is_null()) {
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
@@ -3986,17 +4056,17 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
- if (representation.IsSmi() &&
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(write_register, offset), representation);
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+ }
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
@@ -4051,65 +4121,64 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- HBoundsCheck* hinstr = instr->hydrogen();
- if (hinstr->skip_check()) return;
-
- Representation representation = hinstr->length()->representation();
- ASSERT(representation.Equals(hinstr->index()->representation()));
+ Representation representation = instr->hydrogen()->length()->representation();
+ ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
ASSERT(representation.IsSmiOrInteger32());
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
-
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ Cmp(index, Smi::FromInt(length));
+ } else {
+ __ cmpl(index, Immediate(length));
+ }
+ cc = ReverseCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, Immediate(constant_index));
+ __ cmpl(length, Immediate(index));
}
} else {
- Register reg2 = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpp(reg, reg2);
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, reg2);
+ __ cmpl(length, Immediate(index));
}
}
} else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Register index = ToRegister(instr->index());
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(constant_index));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, Immediate(constant_index));
+ __ cmpl(length, index);
}
} else {
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpp(length, ToRegister(instr->index()));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, ToRegister(instr->index()));
+ __ cmpl(length, index);
}
}
}
- Condition condition = hinstr->allow_equality() ? below : below_equal;
- ApplyCheckIf(condition, instr);
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
}
@@ -4209,23 +4278,23 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->value()->representation();
- if (representation.IsInteger32()) {
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+ }
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
@@ -4318,17 +4387,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(rax));
ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movp(rax, object_reg);
- }
__ Move(rbx, to_map);
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
__ bind(&not_applicable);
}
@@ -4348,9 +4414,10 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->left()).is(rdx));
ASSERT(ToRegister(instr->right()).is(rax));
- StringAddStub stub(instr->hydrogen()->flags(),
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4954,29 +5021,35 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr->environment());
@@ -5042,7 +5115,7 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ movq(result_reg, value_reg);
- __ shr(result_reg, Immediate(32));
+ __ shrq(result_reg, Immediate(32));
} else {
__ movd(result_reg, value_reg);
}
@@ -5114,7 +5187,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
- __ sar(temp, Immediate(kPointerSizeLog2));
+ __ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
@@ -5229,10 +5302,11 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
@@ -5285,14 +5359,15 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
@@ -5300,22 +5375,23 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
@@ -5325,7 +5401,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5333,7 +5409,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
@@ -5567,11 +5643,55 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ xorp(rsi, rsi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
Label out_of_object, done;
+ __ Move(kScratchRegister, Smi::FromInt(1));
+ __ testp(index, kScratchRegister);
+ __ j(not_zero, deferred->entry());
+
+ __ sarp(index, Immediate(1));
+
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
@@ -5589,6 +5709,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 37807ede0d..686dc857aa 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
#define V8_X64_LITHIUM_CODEGEN_X64_H_
@@ -35,7 +12,7 @@
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
-#include "v8utils.h"
+#include "utils.h"
#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
@@ -116,6 +93,9 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -147,8 +127,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -224,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 7c7fc29e03..7827abd168 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index f218455b67..5ceacb17d4 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 8c4f24e8fb..eb9e7dd00a 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -179,12 +156,9 @@ template<int R>
bool LTemplateResultInstruction<R>::MustSignExtendResult(
LPlatformChunk* chunk) const {
HValue* hvalue = this->hydrogen_value();
-
- if (hvalue == NULL) return false;
- if (!hvalue->representation().IsInteger32()) return false;
- if (hvalue->HasRange() && !hvalue->range()->CanBeNegative()) return false;
-
- return chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+ return hvalue != NULL &&
+ hvalue->representation().IsInteger32() &&
+ chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
}
@@ -465,7 +439,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -652,6 +626,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -895,7 +871,8 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
@@ -1310,7 +1287,7 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
@@ -1322,8 +1299,7 @@ LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanOverflow) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
result = AssignEnvironment(result);
}
return result;
@@ -1387,13 +1363,31 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
if (instr->RightIsPowerOf2()) {
return DoFlooringDivByPowerOf2I(instr);
} else if (instr->right()->IsConstant()) {
return DoFlooringDivByConstI(instr);
} else {
- return DoDivI(instr);
+ return DoFlooringDivI(instr);
}
}
@@ -1515,14 +1509,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
+ LOperand* right;
+ if (instr->representation().IsSmi()) {
+ // We cannot add a tagged immediate to a tagged value,
+ // so we request it in a register.
+ right = UseRegisterAtStart(right_candidate);
+ } else {
+ right = use_lea ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ }
LAddI* add = new(zone()) LAddI(left, right);
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
+ LInstruction* result = use_lea ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
if (can_overflow) {
result = AssignEnvironment(result);
}
@@ -1600,6 +1599,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(r));
@@ -1768,9 +1769,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = Use(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1804,26 +1812,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LInstruction* res = DefineAsRegister(new(zone()) LNumberUntagD(value));
- if (!instr->value()->representation().IsSmi()) {
- res = AssignEnvironment(res);
- }
- return res;
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1831,78 +1834,70 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
+ LOperand* value = UseRegister(val);
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LInstruction* res =
+ LInstruction* result =
DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
- if (!instr->value()->representation().IsSmi()) {
- // Note: Only deopts in deferred code.
- res = AssignEnvironment(res);
- }
- return res;
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) {
- result = AssignEnvironment(result);
- }
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegister(val);
return DefineAsRegister(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = FixedTemp(xmm1);
LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
return AssignPointerMap(DefineSameAsFirst(result));
} else {
+ LOperand* value = UseRegister(val);
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
if (instr->CheckFlag(HValue::kCanOverflow)) {
- ASSERT(val->CheckFlag(HValue::kUint32));
result = AssignEnvironment(result);
}
return result;
} else {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ new(zone()) LUint32ToDouble(UseRegister(val), temp));
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
+ LOperand* value = Use(val);
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
@@ -1914,7 +1909,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result);
+ return result;
}
@@ -1938,16 +1935,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- // Note: Only deopts in deferred code.
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2243,7 +2236,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
@@ -2252,10 +2244,11 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
object, NULL, new_map_reg, temp_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), rax);
LOperand* context = UseFixed(instr->context(), rsi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2411,7 +2404,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2528,6 +2521,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
@@ -2589,7 +2583,9 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 9d9ac1ea17..093b95b4dd 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_X64_H_
#define V8_X64_LITHIUM_X64_H_
@@ -97,6 +74,7 @@ class LCodeGen;
V(Dummy) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -256,7 +234,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
@@ -732,14 +712,14 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
@@ -794,6 +774,23 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
@@ -1228,6 +1225,9 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return hydrogen()->op(); }
+ bool IsInteger32() const {
+ return hydrogen()->representation().IsInteger32();
+ }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
@@ -1944,7 +1944,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2130,7 +2130,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2339,7 +2338,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2661,6 +2660,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
next_block_(NULL),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2681,12 +2682,13 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathClz32(HUnaryMathOperation* instr);
LInstruction* DoDivByPowerOf2I(HDiv* instr);
LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoDivI(HDiv* instr);
LInstruction* DoModByPowerOf2I(HMod* instr);
LInstruction* DoModByConstI(HMod* instr);
LInstruction* DoModI(HMod* instr);
LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 6f313f7a66..17db9bf1a5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -54,10 +31,10 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-static const int kInvalidRootRegisterDelta = -1;
+static const int64_t kInvalidRootRegisterDelta = -1;
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
if (predictable_code_size() &&
(other.address() < reinterpret_cast<Address>(isolate()) ||
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
@@ -65,17 +42,27 @@ intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
}
Address roots_register_value = kRootRegisterBias +
reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
+
+ int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
+ if (kPointerSize == kInt64Size) {
+ delta = other.address() - roots_register_value;
+ } else {
+ // For x32, zero extend the address to 64-bit and calculate the delta.
+ uint64_t o = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(other.address()));
+ uint64_t r = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(roots_register_value));
+ delta = o - r;
+ }
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
+ int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
@@ -85,10 +72,9 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
@@ -104,10 +90,9 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
+ int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
@@ -124,10 +109,9 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
@@ -138,13 +122,12 @@ void MacroAssembler::LoadAddress(Register destination,
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source);
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
// Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
@@ -161,7 +144,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
+ if (is_int32(address) && !Serializer::enabled(isolate())) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
@@ -253,7 +236,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -269,7 +252,7 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate())) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
@@ -431,7 +414,8 @@ void MacroAssembler::RecordWrite(Register object,
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
@@ -538,12 +522,12 @@ void MacroAssembler::Abort(BailoutReason reason) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -558,14 +542,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addp(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
@@ -577,7 +553,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// key: string key
// hash: key's hash field, including its array index value.
andp(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
+ shrp(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
@@ -591,10 +567,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -602,7 +575,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size, save_doubles);
+ CEntryStub ces(isolate(), f->result_size, save_doubles);
CallStub(&ces);
}
@@ -612,7 +585,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
Set(rax, num_arguments);
LoadAddress(rbx, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -660,7 +633,7 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
void MacroAssembler::CallApiFunctionAndReturn(
Register function_address,
- Address thunk_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -707,16 +680,13 @@ void MacroAssembler::CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, ExternalReference::is_profiling_address(isolate()));
cmpb(Operand(rax, 0), Immediate(0));
j(zero, &profiler_disabled);
// Third parameter is the address of the actual getter function.
Move(thunk_last_arg, function_address);
- Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, thunk_ref);
jmp(&end_profiler_check);
bind(&profiler_disabled);
@@ -821,8 +791,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), result_size);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -981,7 +951,6 @@ void MacroAssembler::Set(const Operand& dst, intptr_t x) {
movp(dst, kScratchRegister);
}
} else {
- ASSERT(kPointerSize == kInt32Size);
movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -998,11 +967,18 @@ bool MacroAssembler::IsUnsafeInt(const int32_t x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xorq(dst, kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(dst, kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ movp(dst, Immediate(value ^ jit_cookie()));
+ xorp(dst, Immediate(jit_cookie()));
+ }
} else {
Move(dst, src);
}
@@ -1010,11 +986,18 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xorq(Operand(rsp, 0), kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(Operand(rsp, 0), kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ Push(Immediate(value ^ jit_cookie()));
+ xorp(Operand(rsp, 0), Immediate(jit_cookie()));
+ }
} else {
Push(src);
}
@@ -1096,7 +1079,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
if (!dst.is(src)) {
movl(dst, src);
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
@@ -1108,8 +1091,15 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+
+ if (SmiValuesAre32Bits()) {
+ ASSERT(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ Integer32ToSmi(kScratchRegister, src);
+ movp(dst, kScratchRegister);
+ }
}
@@ -1121,7 +1111,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
} else {
leal(dst, Operand(src, constant));
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
@@ -1130,12 +1120,24 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
if (!dst.is(src)) {
movp(dst, src);
}
- shr(dst, Immediate(kSmiShift));
+
+ if (SmiValuesAre32Bits()) {
+ shrp(dst, Immediate(kSmiShift));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(dst, src);
+ sarl(dst, Immediate(kSmiShift));
+ }
}
@@ -1144,12 +1146,22 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
if (!dst.is(src)) {
movp(dst, src);
}
- sar(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(kSmiShift));
+ if (kPointerSize == kInt32Size) {
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movp(dst, src);
+ SmiToInteger64(dst, dst);
+ }
}
@@ -1199,7 +1211,12 @@ void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(dst, Immediate(src));
+ }
}
@@ -1212,7 +1229,13 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, dst);
+ cmpl(kScratchRegister, src);
+ }
}
@@ -1229,9 +1252,9 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
movp(dst, src);
}
if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
+ sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
+ shlp(dst, Immediate(power - kSmiShift));
}
}
@@ -1241,7 +1264,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
+ shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -1284,7 +1307,7 @@ Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
movp(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
+ rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -1295,8 +1318,15 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
return CheckSmi(first);
}
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
+ if (SmiValuesAre32Bits()) {
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ }
return zero;
}
@@ -1308,7 +1338,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
}
movp(kScratchRegister, first);
orp(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
+ rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
@@ -1342,16 +1372,28 @@ Condition MacroAssembler::CheckIsMinSmi(Register src) {
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
+ if (SmiValuesAre32Bits()) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(src, Immediate(0xc0000000));
+ return positive;
+ }
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
+ if (SmiValuesAre32Bits()) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testl(src, src);
+ return positive;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(0xc0000000));
+ return zero;
+ }
}
@@ -1494,7 +1536,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
+ if (SmiValuesAre32Bits()) {
+ addl(Operand(dst, kSmiShift / kBitsPerByte),
+ Immediate(constant->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ addp(dst, Immediate(constant));
+ }
}
}
@@ -1952,8 +2000,14 @@ void MacroAssembler::SmiMod(Register dst,
void MacroAssembler::SmiNot(Register dst, Register src) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
+ if (SmiValuesAre32Bits()) {
+ // Set tag and padding bits before negating, so that they are zero
+ // afterwards.
+ movl(kScratchRegister, Immediate(~0));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, Immediate(1));
+ }
if (dst.is(src)) {
xorp(dst, kScratchRegister);
} else {
@@ -2034,8 +2088,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
ASSERT(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -2050,7 +2104,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
movp(dst, src);
}
if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
+ shlp(dst, Immediate(shift_value));
}
}
@@ -2067,8 +2121,8 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ shrq(dst, Immediate(shift_value + kSmiShift));
+ shlq(dst, Immediate(kSmiShift));
}
}
@@ -2084,7 +2138,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
andq(rcx, Immediate(0x1f));
- shl_cl(dst);
+ shlq_cl(dst);
}
@@ -2107,8 +2161,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
+ shrq_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shlq(dst, Immediate(kSmiShift));
testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) {
Label positive_result;
@@ -2144,8 +2198,8 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
+ sarp_cl(dst); // Shift 32 + original rcx & 0x1f.
+ shlp(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
movp(src1, kScratchRegister);
} else if (src2.is(rcx)) {
@@ -2194,41 +2248,78 @@ void MacroAssembler::SelectNonSmi(Register dst,
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ ASSERT(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ // We have to sign extend the index register to 64-bit as the SMI might
+ // be negative.
+ movsxlq(dst, dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- negq(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ // Register src holds a positive smi.
+ ASSERT(is_uint6(shift));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negp(dst);
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negq(dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, src);
+ addl(dst, kScratchRegister);
+ }
}
@@ -2243,32 +2334,39 @@ void MacroAssembler::Push(Smi* source) {
}
-void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ ASSERT(!src.is(scratch));
movp(scratch, src);
// High bits.
- shr(src, Immediate(64 - kSmiShift));
- shl(src, Immediate(kSmiShift));
+ shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ shlp(src, Immediate(kSmiShift));
Push(src);
// Low bits.
- shl(scratch, Immediate(kSmiShift));
+ shlp(scratch, Immediate(kSmiShift));
Push(scratch);
}
-void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ ASSERT(!dst.is(scratch));
Pop(scratch);
// Low bits.
- shr(scratch, Immediate(kSmiShift));
+ shrp(scratch, Immediate(kSmiShift));
Pop(dst);
- shr(dst, Immediate(kSmiShift));
+ shrp(dst, Immediate(kSmiShift));
// High bits.
- shl(dst, Immediate(64 - kSmiShift));
+ shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
orp(dst, scratch);
}
void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
+ if (SmiValuesAre32Bits()) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(source));
+ }
}
@@ -2315,7 +2413,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
Register probe = mask;
@@ -2338,7 +2436,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
cmpp(object,
@@ -2576,11 +2674,24 @@ void MacroAssembler::Drop(int stack_elements) {
}
+void MacroAssembler::DropUnderReturnAddress(int stack_elements,
+ Register scratch) {
+ ASSERT(stack_elements > 0);
+ if (kPointerSize == kInt64Size && stack_elements == 1) {
+ popq(MemOperand(rsp, 0));
+ return;
+ }
+
+ PopReturnAddressTo(scratch);
+ Drop(stack_elements);
+ PushReturnAddressFrom(scratch);
+}
+
+
void MacroAssembler::Push(Register src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
- ASSERT(kPointerSize == kInt32Size);
// x32 uses 64-bit push for rbp in the prologue.
ASSERT(src.code() != rbp.code());
leal(rsp, Operand(rsp, -4));
@@ -2593,7 +2704,6 @@ void MacroAssembler::Push(const Operand& src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
- ASSERT(kPointerSize == kInt32Size);
movp(kScratchRegister, src);
leal(rsp, Operand(rsp, -4));
movp(Operand(rsp, 0), kScratchRegister);
@@ -2601,11 +2711,20 @@ void MacroAssembler::Push(const Operand& src) {
}
+void MacroAssembler::PushQuad(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ pushq(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Push(Immediate value) {
if (kPointerSize == kInt64Size) {
pushq(value);
} else {
- ASSERT(kPointerSize == kInt32Size);
leal(rsp, Operand(rsp, -4));
movp(Operand(rsp, 0), value);
}
@@ -2616,7 +2735,6 @@ void MacroAssembler::PushImm32(int32_t imm32) {
if (kPointerSize == kInt64Size) {
pushq_imm32(imm32);
} else {
- ASSERT(kPointerSize == kInt32Size);
leal(rsp, Operand(rsp, -4));
movp(Operand(rsp, 0), Immediate(imm32));
}
@@ -2627,7 +2745,6 @@ void MacroAssembler::Pop(Register dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
- ASSERT(kPointerSize == kInt32Size);
// x32 uses 64-bit pop for rbp in the epilogue.
ASSERT(dst.code() != rbp.code());
movp(dst, Operand(rsp, 0));
@@ -2640,7 +2757,6 @@ void MacroAssembler::Pop(const Operand& dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
- ASSERT(kPointerSize == kInt32Size);
Register scratch = dst.AddressUsesRegister(kScratchRegister)
? kSmiConstantRegister : kScratchRegister;
movp(scratch, Operand(rsp, 0));
@@ -2656,10 +2772,44 @@ void MacroAssembler::Pop(const Operand& dst) {
}
-void MacroAssembler::TestBit(const Operand& src, int bits) {
+void MacroAssembler::PopQuad(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ popq(kScratchRegister);
+ movp(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt64Size) {
+ movsxlq(dst, FieldOperand(base, offset));
+ } else {
+ movp(dst, FieldOperand(base, offset));
+ SmiToInteger32(dst, dst);
+ }
+}
+
+
+void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bits) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt32Size) {
+ // On x32, this field is represented by SMI.
+ bits += kSmiShift;
+ }
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+ testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
}
@@ -2673,7 +2823,6 @@ void MacroAssembler::Jump(const Operand& op) {
if (kPointerSize == kInt64Size) {
jmp(op);
} else {
- ASSERT(kPointerSize == kInt32Size);
movp(kScratchRegister, op);
jmp(kScratchRegister);
}
@@ -2715,7 +2864,6 @@ void MacroAssembler::Call(const Operand& op) {
if (kPointerSize == kInt64Size) {
call(op);
} else {
- ASSERT(kPointerSize == kInt32Size);
movp(kScratchRegister, op);
call(kScratchRegister);
}
@@ -2893,7 +3041,7 @@ void MacroAssembler::JumpToHandlerEntry() {
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
+ shrp(rdx, Immediate(StackHandler::kKindWidth));
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
@@ -3188,8 +3336,8 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -3212,6 +3360,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3228,6 +3378,8 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
addp(rsp, Immediate(kDoubleSize));
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3498,9 +3650,9 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
+ TestBitSharedFunctionInfoSpecialField(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
}
@@ -3573,15 +3725,13 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::InvokeCode(Register code,
@@ -3627,8 +3777,8 @@ void MacroAssembler::InvokeFunction(Register function,
ASSERT(function.is(rdi));
movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -4882,7 +5032,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
+ shlp_cl(mask_reg);
}
@@ -4966,7 +5116,7 @@ void MacroAssembler::EnsureNotWhite(
addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
imulp(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
andp(length, Immediate(~kObjectAlignmentMask));
@@ -5065,7 +5215,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
andp(scratch1, Immediate(Map::kElementsKindMask));
- shr(scratch1, Immediate(Map::kElementsKindShift));
+ shrp(scratch1, Immediate(Map::kElementsKindShift));
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index af65a65465..d9893d6210 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
#define V8_X64_MACRO_ASSEMBLER_X64_H_
@@ -291,12 +268,10 @@ class MacroAssembler: public Assembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
@@ -657,10 +632,10 @@ class MacroAssembler: public Assembler {
Register src,
int shift_value);
void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -726,12 +701,12 @@ class MacroAssembler: public Assembler {
void Push(Smi* smi);
- // Save away a 64-bit integer on the stack as two 32-bit integers
+ // Save away a raw integer with pointer size on the stack as two integers
// masquerading as smis so that the garbage collector skips visiting them.
- void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
- // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
- // smis on the top of stack.
- void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+ void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a raw integer with pointer size from two integers masquerading
+ // as smis on the top of stack.
+ void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
void Test(const Operand& dst, Smi* source);
@@ -813,8 +788,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Bit-field support.
- void TestBit(const Operand& dst, int bit_index);
+ // TestBit and Load SharedFunctionInfo special field.
+ void TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bit_index);
+ void LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset);
// Handle support
void Move(Register dst, Handle<Object> source);
@@ -835,14 +815,21 @@ class MacroAssembler: public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
+ // Emit code to discard a positive number of pointer-sized elements
+ // from the stack under the return address which remains on the top,
+ // clobbering the rsp register.
+ void DropUnderReturnAddress(int stack_elements,
+ Register scratch = kScratchRegister);
void Call(Label* target) { call(target); }
void Push(Register src);
void Push(const Operand& src);
+ void PushQuad(const Operand& src);
void Push(Immediate value);
void PushImm32(int32_t imm32);
void Pop(Register dst);
void Pop(const Operand& dst);
+ void PopQuad(const Operand& dst);
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
@@ -1026,9 +1013,9 @@ class MacroAssembler: public Assembler {
void DecodeField(Register reg) {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(shift));
+ shrp(reg, Immediate(shift));
andp(reg, Immediate(mask));
- shl(reg, Immediate(kSmiShift));
+ shlp(reg, Immediate(kSmiShift));
}
// Abort execution if argument is not a number, enabled via --debug-code.
@@ -1218,10 +1205,6 @@ class MacroAssembler: public Assembler {
Label* miss,
bool miss_on_bound_function = false);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -1310,7 +1293,7 @@ class MacroAssembler: public Assembler {
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
void CallApiFunctionAndReturn(Register function_address,
- Address thunk_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -1460,7 +1443,7 @@ class MacroAssembler: public Assembler {
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
- intptr_t RootRegisterDelta(ExternalReference other);
+ int64_t RootRegisterDelta(ExternalReference other);
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
@@ -1501,13 +1484,6 @@ class MacroAssembler: public Assembler {
Register scratch,
AllocationFlags flags);
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index c819c71cb9..6a9a264f97 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -241,8 +218,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
__ subp(rbx, rdx); // Length of capture.
// -----------------------
@@ -390,8 +367,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
Label fallthrough;
// Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
__ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
@@ -692,12 +669,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
+ ASSERT_EQ(kInputString, -1 * kRegisterSize);
+ ASSERT_EQ(kStartIndex, -2 * kRegisterSize);
+ ASSERT_EQ(kInputStart, -3 * kRegisterSize);
+ ASSERT_EQ(kInputEnd, -4 * kRegisterSize);
+ ASSERT_EQ(kRegisterOutput, -5 * kRegisterSize);
+ ASSERT_EQ(kNumOutputRegisters, -6 * kRegisterSize);
__ pushq(rdi);
__ pushq(rsi);
__ pushq(rdx);
@@ -747,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Load input position.
__ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
- __ subp(rdi, rsi);
+ __ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
@@ -831,14 +808,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
+ __ movp(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
__ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
+ __ sarp(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
@@ -1084,13 +1061,31 @@ void RegExpMacroAssemblerX64::PushRegister(int register_index,
}
+STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
+
+
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
+ if (kPointerSize == kInt64Size) {
+ __ movq(rdi, register_location(reg));
+ } else {
+ // Need sign extension for x32 as rdi might be used as an index register.
+ __ movsxlq(rdi, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
+ if (kPointerSize == kInt64Size) {
+ __ movq(dst, register_location(reg));
+ } else {
+ // Need sign extension for x32 as dst might be used as an index register.
+ __ movsxlq(dst, register_location(reg));
+ }
}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
+ __ movp(backtrack_stackpointer(), register_location(reg));
__ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
@@ -1215,7 +1210,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code->address();
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index b230ea47fc..e9f6a35ddb 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
@@ -135,8 +112,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
+ static const int kReturn_eip = kFramePointer + kRegisterSize;
+ static const int kFrameAlign = kReturn_eip + kRegisterSize;
#ifdef _WIN64
// Parameters (first four passed as registers, but with room on stack).
@@ -145,49 +122,50 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// use this space to store the register passed parameters.
static const int kInputString = kFrameAlign;
// StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
+ static const int kStartIndex = kInputString + kRegisterSize;
+ static const int kInputStart = kStartIndex + kRegisterSize;
+ static const int kInputEnd = kInputStart + kRegisterSize;
+ static const int kRegisterOutput = kInputEnd + kRegisterSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
+ static const int kInputString = kFramePointer - kRegisterSize;
+ static const int kStartIndex = kInputString - kRegisterSize;
+ static const int kInputStart = kStartIndex - kRegisterSize;
+ static const int kInputEnd = kInputStart - kRegisterSize;
+ static const int kRegisterOutput = kInputEnd - kRegisterSize;
+
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize;
static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#endif
#ifdef _WIN64
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
+ static const int kBackup_rsi = kFramePointer - kRegisterSize;
+ static const int kBackup_rdi = kBackup_rsi - kRegisterSize;
+ static const int kBackup_rbx = kBackup_rdi - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
+ static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -268,6 +246,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ inline void ReadPositionFromRegister(Register dst, int reg);
+
Isolate* isolate() const { return masm_.isolate(); }
MacroAssembler masm_;
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
index 448b025a6b..f7f2fb4bb4 100644
--- a/deps/v8/src/x64/simulator-x64.cc
+++ b/deps/v8/src/x64/simulator-x64.cc
@@ -1,26 +1,3 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 8aba70181f..a43728f01d 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 13e822da2b..537f412351 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
@@ -420,7 +397,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -495,6 +472,21 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
} else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -639,6 +631,21 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
} else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -741,7 +748,9 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -764,7 +773,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -908,15 +917,17 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
Representation representation) {
if (!reg.is(receiver())) __ movp(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
+ LoadFieldStub stub(isolate(),
+ field.is_inobject(holder),
field.translate(holder),
representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
+ KeyedLoadFieldStub stub(isolate(),
+ field.is_inobject(holder),
field.translate(holder),
representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateTailCall(masm(), stub.GetCode());
}
}
@@ -961,7 +972,7 @@ void LoadStubCompiler::GenerateLoadCallback(
Address getter_address = v8::ToCData<Address>(callback->getter());
__ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
- CallApiGetterStub stub;
+ CallApiGetterStub stub(isolate());
__ TailCallStub(&stub);
}
@@ -1064,17 +1075,6 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ Cmp(object, factory()->true_value());
- __ j(equal, &success);
- __ Cmp(object, factory()->false_value());
- __ j(not_equal, miss);
- __ bind(&success);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,