summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/api.cc21
-rw-r--r--deps/v8/src/arm/assembler-arm.cc21
-rw-r--r--deps/v8/src/arm/assembler-arm.h94
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc45
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc46
-rw-r--r--deps/v8/src/arm/disasm-arm.cc8
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc2
-rw-r--r--deps/v8/src/arm/ic-arm.cc2
-rw-r--r--deps/v8/src/arm/lithium-arm.cc90
-rw-r--r--deps/v8/src/arm/lithium-arm.h46
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc234
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h7
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc16
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc117
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h30
-rw-r--r--deps/v8/src/arm/simulator-arm.cc9
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc23
-rw-r--r--deps/v8/src/ast.cc13
-rw-r--r--deps/v8/src/bootstrapper.cc11
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc92
-rw-r--r--deps/v8/src/code-stubs.h10
-rw-r--r--deps/v8/src/codegen.cc1
-rw-r--r--deps/v8/src/compiler.cc15
-rw-r--r--deps/v8/src/cpu-profiler.cc2
-rw-r--r--deps/v8/src/cpu-profiler.h18
-rw-r--r--deps/v8/src/d8.cc24
-rw-r--r--deps/v8/src/d8.h2
-rw-r--r--deps/v8/src/debug-debugger.js14
-rw-r--r--deps/v8/src/debug.cc4
-rw-r--r--deps/v8/src/deoptimizer.cc193
-rw-r--r--deps/v8/src/deoptimizer.h14
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.cc34
-rw-r--r--deps/v8/src/factory.cc1
-rw-r--r--deps/v8/src/flag-definitions.h10
-rw-r--r--deps/v8/src/frames-inl.h6
-rw-r--r--deps/v8/src/frames.cc44
-rw-r--r--deps/v8/src/frames.h13
-rw-r--r--deps/v8/src/global-handles.cc1
-rw-r--r--deps/v8/src/globals.h15
-rw-r--r--deps/v8/src/heap-inl.h13
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc4
-rw-r--r--deps/v8/src/heap.cc198
-rw-r--r--deps/v8/src/heap.h64
-rw-r--r--deps/v8/src/hydrogen-bce.cc4
-rw-r--r--deps/v8/src/hydrogen-bch.cc408
-rw-r--r--deps/v8/src/hydrogen-bch.h (renamed from deps/v8/src/platform-tls.h)41
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc17
-rw-r--r--deps/v8/src/hydrogen-deoptimizing-mark.cc2
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc9
-rw-r--r--deps/v8/src/hydrogen-gvn.cc2
-rw-r--r--deps/v8/src/hydrogen-instructions.cc904
-rw-r--r--deps/v8/src/hydrogen-instructions.h707
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc4
-rw-r--r--deps/v8/src/hydrogen-osr.cc2
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc33
-rw-r--r--deps/v8/src/hydrogen.cc494
-rw-r--r--deps/v8/src/hydrogen.h138
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc31
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc46
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc203
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h22
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc16
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc76
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h34
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc16
-rw-r--r--deps/v8/src/ic.cc23
-rw-r--r--deps/v8/src/isolate.cc3
-rw-r--r--deps/v8/src/isolate.h23
-rw-r--r--deps/v8/src/lithium.cc2
-rw-r--r--deps/v8/src/liveedit.cc1
-rw-r--r--deps/v8/src/log.cc58
-rw-r--r--deps/v8/src/mark-compact.cc127
-rw-r--r--deps/v8/src/mark-compact.h2
-rw-r--r--deps/v8/src/messages.js75
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc38
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc51
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc151
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h10
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc16
-rw-r--r--deps/v8/src/mips/lithium-mips.cc82
-rw-r--r--deps/v8/src/mips/lithium-mips.h41
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc11
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc15
-rw-r--r--deps/v8/src/mirror-debugger.js2
-rw-r--r--deps/v8/src/objects-inl.h25
-rw-r--r--deps/v8/src/objects-printer.cc14
-rw-r--r--deps/v8/src/objects.cc141
-rw-r--r--deps/v8/src/objects.h52
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc67
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h10
-rw-r--r--deps/v8/src/platform-cygwin.cc176
-rw-r--r--deps/v8/src/platform-freebsd.cc153
-rw-r--r--deps/v8/src/platform-linux.cc176
-rw-r--r--deps/v8/src/platform-macos.cc239
-rw-r--r--deps/v8/src/platform-openbsd.cc184
-rw-r--r--deps/v8/src/platform-posix.cc338
-rw-r--r--deps/v8/src/platform-posix.h3
-rw-r--r--deps/v8/src/platform-solaris.cc145
-rw-r--r--deps/v8/src/platform-tls-mac.h62
-rw-r--r--deps/v8/src/platform-tls-win32.h62
-rw-r--r--deps/v8/src/platform.h57
-rw-r--r--deps/v8/src/profile-generator.cc3
-rw-r--r--deps/v8/src/property-details.h2
-rw-r--r--deps/v8/src/property.cc10
-rw-r--r--deps/v8/src/property.h46
-rw-r--r--deps/v8/src/runtime.cc80
-rw-r--r--deps/v8/src/runtime.h9
-rw-r--r--deps/v8/src/sampler.cc11
-rw-r--r--deps/v8/src/spaces.h5
-rw-r--r--deps/v8/src/stub-cache.cc23
-rw-r--r--deps/v8/src/stub-cache.h8
-rw-r--r--deps/v8/src/token.h13
-rw-r--r--deps/v8/src/type-info.cc3
-rw-r--r--deps/v8/src/typedarray.js52
-rw-r--r--deps/v8/src/types.cc2
-rw-r--r--deps/v8/src/v8-counters.h2
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/vm-state-inl.h23
-rw-r--r--deps/v8/src/vm-state.h11
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h10
-rw-r--r--deps/v8/src/x64/assembler-x64.cc8
-rw-r--r--deps/v8/src/x64/assembler-x64.h1
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc30
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc48
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc187
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h5
-rw-r--r--deps/v8/src/x64/lithium-x64.cc96
-rw-r--r--deps/v8/src/x64/lithium-x64.h42
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc7
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc16
132 files changed, 4399 insertions, 3833 deletions
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index c93b23c471..91174d69e9 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -770,7 +770,6 @@ void Context::Exit() {
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
- isolate->set_context_exit_happened(true);
}
@@ -780,8 +779,8 @@ static void* DecodeSmiToAligned(i::Object* value, const char* location) {
}
-static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
- i::Smi* smi = reinterpret_cast<i::Smi*>(value);
+static i::Smi* EncodeAlignedAsSmi(const void* value, const char* location) {
+ i::Smi* smi = const_cast<i::Smi*>(reinterpret_cast<const i::Smi*>(value));
ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
return smi;
}
@@ -5938,6 +5937,10 @@ Local<String> v8::String::NewExternal(
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
CHECK(resource && resource->data());
+ // Resource pointers need to look like Smis since ExternalString objects
+ // are sometimes put into old pointer space (see i::String::MakeExternal).
+ CHECK(EncodeAlignedAsSmi(resource, "v8::String::NewExternal()"));
+ CHECK(EncodeAlignedAsSmi(resource->data(), "v8::String::NewExternal()"));
i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -5959,6 +5962,10 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false;
}
CHECK(resource && resource->data());
+ // Resource pointers need to look like Smis since ExternalString objects
+ // are sometimes put into old pointer space (see i::String::MakeExternal).
+ CHECK(EncodeAlignedAsSmi(resource, "v8::String::MakeExternal()"));
+ CHECK(EncodeAlignedAsSmi(resource->data(), "v8::String::MakeExternal()"));
bool result = obj->MakeExternal(resource);
if (result && !obj->IsInternalizedString()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -5974,6 +5981,10 @@ Local<String> v8::String::NewExternal(
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
CHECK(resource && resource->data());
+ // Resource pointers need to look like Smis since ExternalString objects
+ // are sometimes put into old pointer space (see i::String::MakeExternal).
+ CHECK(EncodeAlignedAsSmi(resource, "v8::String::NewExternal()"));
+ CHECK(EncodeAlignedAsSmi(resource->data(), "v8::String::NewExternal()"));
i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -5996,6 +6007,10 @@ bool v8::String::MakeExternal(
return false;
}
CHECK(resource && resource->data());
+ // Resource pointers need to look like Smis since ExternalString objects
+ // are sometimes put into old pointer space (see i::String::MakeExternal).
+ CHECK(EncodeAlignedAsSmi(resource, "v8::String::MakeExternal()"));
+ CHECK(EncodeAlignedAsSmi(resource->data(), "v8::String::MakeExternal()"));
bool result = obj->MakeExternal(resource);
if (result && !obj->IsInternalizedString()) {
isolate->heap()->external_string_table()->AddString(*obj);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index d95946e964..ba0dc4b81d 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -2373,15 +2373,16 @@ void Assembler::vmov(const DwVfpRegister dst,
if (scratch.is(no_reg)) {
if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
// Move the low part of the double into the lower of the corresponsing S
// registers of D register dst.
mov(ip, Operand(lo));
- vmov(dst.low(), ip);
+ vmov(loc.low(), ip);
// Move the high part of the double into the higher of the
// corresponsing S registers of D register dst.
mov(ip, Operand(hi));
- vmov(dst.high(), ip);
+ vmov(loc.high(), ip);
} else {
// D16-D31 does not have S registers, so move the low and high parts
// directly to the D register using vmov.32.
@@ -2446,6 +2447,22 @@ void Assembler::vmov(const DwVfpRegister dst,
}
+void Assembler::vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8.8.342.
+ // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
+ // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(index.index == 0 || index.index == 1);
+ int vn, n;
+ src.split_code(&vn, &n);
+ emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
+ 0xB*B8 | n*B7 | B4);
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 62dd94c078..496eb3e880 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -267,22 +267,6 @@ struct DwVfpRegister {
return 0 <= code_ && code_ < kMaxNumRegisters;
}
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
int code() const {
ASSERT(is_valid());
return code_;
@@ -304,6 +288,47 @@ struct DwVfpRegister {
typedef DwVfpRegister DoubleRegister;
+// Double word VFP register d0-15.
+struct LowDwVfpRegister {
+ public:
+ static const int kMaxNumLowRegisters = 16;
+ operator DwVfpRegister() const {
+ DwVfpRegister r = { code_ };
+ return r;
+ }
+ static LowDwVfpRegister from_code(int code) {
+ LowDwVfpRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ SwVfpRegister low() const {
+ SwVfpRegister reg;
+ reg.code_ = code_ * 2;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+ SwVfpRegister high() const {
+ SwVfpRegister reg;
+ reg.code_ = (code_ * 2) + 1;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+
+ int code_;
+};
+
+
// Quad word NEON register.
struct QwNeonRegister {
static const int kMaxNumRegisters = 16;
@@ -370,22 +395,22 @@ const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const LowDwVfpRegister d0 = { 0 };
+const LowDwVfpRegister d1 = { 1 };
+const LowDwVfpRegister d2 = { 2 };
+const LowDwVfpRegister d3 = { 3 };
+const LowDwVfpRegister d4 = { 4 };
+const LowDwVfpRegister d5 = { 5 };
+const LowDwVfpRegister d6 = { 6 };
+const LowDwVfpRegister d7 = { 7 };
+const LowDwVfpRegister d8 = { 8 };
+const LowDwVfpRegister d9 = { 9 };
+const LowDwVfpRegister d10 = { 10 };
+const LowDwVfpRegister d11 = { 11 };
+const LowDwVfpRegister d12 = { 12 };
+const LowDwVfpRegister d13 = { 13 };
+const LowDwVfpRegister d14 = { 14 };
+const LowDwVfpRegister d15 = { 15 };
const DwVfpRegister d16 = { 16 };
const DwVfpRegister d17 = { 17 };
const DwVfpRegister d18 = { 18 };
@@ -420,6 +445,7 @@ const QwNeonRegister q13 = { 13 };
const QwNeonRegister q14 = { 14 };
const QwNeonRegister q15 = { 15 };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
@@ -1109,6 +1135,10 @@ class Assembler : public AssemblerBase {
const VmovIndex index,
const Register src,
const Condition cond = al);
+ void vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 7773667b7e..ba98b96315 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -38,6 +38,16 @@ namespace v8 {
namespace internal {
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -286,17 +296,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, scratch2);
- __ b(ne, not_a_heap_number);
-}
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Isolate* isolate = masm->isolate();
@@ -321,22 +320,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(r0, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
@@ -1914,7 +1897,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register right = r0;
Register scratch1 = r7;
Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
+ LowDwVfpRegister double_scratch = d0;
Register heap_number_result = no_reg;
Register heap_number_map = r6;
@@ -1989,7 +1972,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_zero;
ASSERT(kSmiTag == 0);
__ b(ne, &not_zero);
- __ vmov(scratch2, d5.high());
+ __ VmovHigh(scratch2, d5);
__ tst(scratch2, Operand(HeapNumber::kSignMask));
__ b(ne, &transition);
__ bind(&not_zero);
@@ -3834,7 +3817,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
- __ CopyFields(r0, r4, d0, s0, JSObject::kHeaderSize / kPointerSize);
+ __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -6821,7 +6804,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements);
+ __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
__ Ret();
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 780bafb755..5b42116ad4 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -44,22 +44,8 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- DisallowHeapAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
- // The optimized code is going to be patched, so we cannot use it any more.
- function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
-
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -92,25 +78,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
prev_call_address = call_address;
#endif
}
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
}
@@ -635,6 +602,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index fd986fd656..ecdf638a1d 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1345,6 +1345,14 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 6a5845de43..ea7b73f2fe 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -1623,7 +1623,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
- __ CopyFields(r0, r5, d0, s0, size / kPointerSize);
+ __ CopyFields(r0, r5, d0, size / kPointerSize);
context()->Plug(r0);
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index ee28d28198..f43846ce58 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1361,7 +1361,7 @@ static void KeyedStoreGenerateGenericHelper(
__ b(ne, slow);
}
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r3,
+ __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index b68d22f336..00001460d9 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -701,11 +701,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -783,8 +778,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
@@ -1318,17 +1313,17 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
@@ -1350,7 +1345,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ } else if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
@@ -1441,9 +1438,9 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- if (instr->representation().IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
@@ -1483,7 +1480,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
+ } else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
@@ -1499,9 +1496,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left;
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
@@ -1550,9 +1547,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->left()->IsConstant()) {
// If lhs is constant, do reverse subtraction instead.
@@ -1580,9 +1577,9 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
// Note: The lhs of the subtraction becomes the rhs of the
// reverse-subtraction.
@@ -1618,9 +1615,9 @@ LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
@@ -1641,7 +1638,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1650,9 +1647,9 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1731,13 +1728,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2035,9 +2025,14 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
+ LUnallocated* temp1 = NULL;
+ LOperand* temp2 = NULL;
+ if (!instr->CanOmitPrototypeChecks()) {
+ temp1 = TempRegister();
+ temp2 = TempRegister();
+ }
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ if (instr->CanOmitPrototypeChecks()) return result;
return AssignEnvironment(result);
}
@@ -2049,8 +2044,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
return AssignEnvironment(result);
}
@@ -2060,7 +2057,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
@@ -2206,8 +2203,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 5165f1bbb6..6faa7813d2 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -40,12 +40,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
@@ -73,13 +67,13 @@ class LCodeGen;
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
@@ -89,14 +83,18 @@ class LCodeGen;
V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
+ V(Drop) \
V(DummyUse) \
V(ElementsKind) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -104,13 +102,13 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
- V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -123,6 +121,7 @@ class LCodeGen;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -187,16 +186,10 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop) \
- V(InnerAllocatedObject)
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -433,6 +426,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -894,20 +888,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -2440,15 +2420,13 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
- temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 9e0d59f8ec..41636a8171 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -277,6 +277,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
return !is_aborted();
}
@@ -425,11 +426,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ mov(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ __ LoadObject(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -497,9 +494,18 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
@@ -521,7 +527,10 @@ Operand LCodeGen::ToOperand(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
@@ -676,6 +685,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
+ EnsureSpaceForLazyDeopt();
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
@@ -808,12 +818,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(al, environment, Deoptimizer::SOFT);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1186,7 +1190,7 @@ void LCodeGen::DoModI(LModI* instr) {
DwVfpRegister dividend = ToDoubleRegister(instr->temp());
DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
ASSERT(!divisor.is(dividend));
- DwVfpRegister quotient = double_scratch0();
+ LowDwVfpRegister quotient = double_scratch0();
ASSERT(!quotient.is(dividend));
ASSERT(!quotient.is(divisor));
@@ -1202,11 +1206,10 @@ void LCodeGen::DoModI(LModI* instr) {
// Load the arguments in VFP registers. The divisor value is preloaded
// before. Be careful that 'right_reg' is only live on entry.
// TODO(svenpanne) The last comments seems to be wrong nowadays.
- __ vmov(dividend.low(), left_reg);
- __ vmov(divisor.low(), right_reg);
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
+ __ vmov(double_scratch0().low(), left_reg);
+ __ vcvt_f64_s32(dividend, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right_reg);
+ __ vcvt_f64_s32(divisor, double_scratch0().low());
// We do not care about the sign of the divisor. Note that we still handle
// the kMinInt % -1 case correctly, though.
@@ -1217,10 +1220,9 @@ void LCodeGen::DoModI(LModI* instr) {
__ vcvt_f64_s32(quotient, quotient.low());
// Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
+ __ vmul(double_scratch0(), divisor, quotient);
+ __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
+ __ vmov(scratch, double_scratch0().low());
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
@@ -1420,20 +1422,20 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
const DoubleRegister vleft = ToDoubleRegister(instr->temp());
const DoubleRegister vright = double_scratch0();
- __ vmov(vleft.low(), left);
- __ vmov(vright.low(), right);
- __ vcvt_f64_s32(vleft, vleft.low());
- __ vcvt_f64_s32(vright, vright.low());
+ __ vmov(double_scratch0().low(), left);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
- __ vcvt_s32_f64(vright.low(), vleft);
- __ vmov(result, vright.low());
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
if (!instr->hydrogen()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
// Deopt if exact conversion to integer was not possible.
// Use vright as scratch register.
- __ vcvt_f64_s32(vright, vright.low());
- __ VFPCompareAndSetFlags(vleft, vright);
+ __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
+ __ VFPCompareAndSetFlags(vleft, double_scratch0());
DeoptimizeIf(ne, instr->environment());
}
}
@@ -1548,7 +1550,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (right_op->IsConstantOperand() && !can_overflow) {
// Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+ int32_t constant = ToRepresentation(
+ LConstantOperand::cast(right_op),
+ instr->hydrogen()->right()->representation());
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1612,11 +1616,21 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (can_overflow) {
// scratch:result = left * right.
- __ smull(result, scratch, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ smull(result, scratch, result, right);
+ } else {
+ __ smull(result, scratch, left, right);
+ }
__ cmp(scratch, Operand(result, ASR, 31));
DeoptimizeIf(ne, instr->environment());
} else {
- __ mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mul(result, result, right);
+ } else {
+ __ mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
@@ -1804,12 +1818,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- __ mov(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
+ __ LoadObject(ToRegister(instr->result()), value);
}
@@ -1971,7 +1980,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
@@ -2370,14 +2379,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- __ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(instr, eq);
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -3030,9 +3031,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -3207,8 +3208,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, kScratchDoubleReg.low());
+ __ vldr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
@@ -3776,7 +3777,6 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Register input_high = scratch0();
Label done, exact;
- __ vmov(input_high, input.high());
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
DeoptimizeIf(al, instr->environment());
@@ -3809,7 +3809,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the input is +0.5, the result is 1.
__ b(hi, &convert); // Out of [-0.5, +0.5].
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ vmov(input_high, input.high());
+ __ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
}
@@ -3822,7 +3822,6 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&convert);
__ vadd(input_plus_dot_five, input, dot_five);
- __ vmov(input_high, input_plus_dot_five.high());
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
@@ -4264,6 +4263,18 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(cc), &done);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
@@ -4279,7 +4290,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
}
- DeoptimizeIf(hs, instr->environment());
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
+ ApplyCheckIf(condition, instr);
}
@@ -4745,8 +4757,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DwVfpRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
+ LowDwVfpRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -4760,11 +4771,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
}
if (FLAG_inline_new) {
@@ -4825,7 +4836,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
- __ vmov(scratch, input_reg.high());
+ __ VmovHigh(scratch, input_reg);
__ cmp(scratch, Operand(kHoleNanUpper32));
// If not the hole NaN, force the NaN to be canonical.
__ VFPCanonicalizeNaN(input_reg, ne);
@@ -4925,22 +4936,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(ne, env);
__ bind(&convert);
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
__ bind(&heap_number);
}
// Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand::Zero());
+ __ VmovLow(scratch, result_reg);
+ __ cmp(scratch, Operand::Zero());
__ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
+ __ VmovHigh(scratch, result_reg);
+ __ cmp(scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(eq, env);
}
__ jmp(&done);
@@ -4962,7 +4971,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
@@ -5010,14 +5019,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
DeoptimizeIf(ne, instr->environment());
__ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(input_reg, double_scratch, double_scratch2);
+ __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
+ __ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
}
@@ -5090,7 +5099,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
@@ -5104,7 +5113,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_input.high());
+ __ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
@@ -5118,7 +5127,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
@@ -5132,7 +5141,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_input.high());
+ __ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
@@ -5230,6 +5239,7 @@ void LCodeGen::DoCheckMapCommon(Register map_reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
@@ -5252,8 +5262,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
}
@@ -5288,9 +5297,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
__ jmp(&done);
// smi
@@ -5302,6 +5310,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@@ -5310,12 +5320,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
}
}
@@ -5364,6 +5372,25 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(scratch, Operand(size));
+ } else {
+ scratch = ToRegister(instr->size());
+ }
+ __ sub(scratch, scratch, Operand(kPointerSize));
+ __ sub(result, result, Operand(kHeapObjectTag));
+ Label loop;
+ __ bind(&loop);
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ str(scratch2, MemOperand(result, scratch));
+ __ sub(scratch, scratch, Operand(kPointerSize));
+ __ cmp(scratch, Operand(0));
+ __ b(ge, &loop);
+ __ add(result, result, Operand(kHeapObjectTag));
+ }
}
@@ -5444,8 +5471,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&allocated);
// Copy the content into the newly allocated memory.
- __ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(),
- size / kPointerSize);
+ __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
}
@@ -5610,12 +5636,12 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5624,11 +5650,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment());
- } else {
- DeoptimizeIf(al, instr->environment());
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
}
+ DeoptimizeIf(al, instr->environment(), type);
}
@@ -5673,6 +5703,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5685,6 +5716,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index b0390ee445..21f792153b 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -115,7 +115,8 @@ class LCodeGen BASE_EMBEDDED {
DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@@ -199,7 +200,7 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+ LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
@@ -284,7 +285,7 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void SoftDeoptimize(LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
LOperand* op,
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 902817e140..7a3c96892c 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -247,10 +247,10 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ mov(dst, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -261,11 +261,11 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsSmi(constant_source)) {
- __ mov(kSavedValueRegister, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32(constant_source)) {
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
- Operand(cgen_->ToInteger32(constant_source)));
+ Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(kSavedValueRegister,
cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 8416926b46..cd124610f9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -792,6 +792,46 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
}
+void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.high());
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.high(), src);
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.low());
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.low(), src);
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
void MacroAssembler::ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
@@ -799,7 +839,7 @@ void MacroAssembler::ConvertNumberToInt32(Register object,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
+ LowDwVfpRegister double_scratch2,
Label* not_number) {
Label done;
UntagAndJumpIfSmi(dst, object, &done);
@@ -813,7 +853,7 @@ void MacroAssembler::ConvertNumberToInt32(Register object,
void MacroAssembler::LoadNumber(Register object,
- DwVfpRegister dst,
+ LowDwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number) {
@@ -838,7 +878,7 @@ void MacroAssembler::LoadNumberAsInt32Double(Register object,
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* not_int32) {
ASSERT(!scratch.is(object));
ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
@@ -870,7 +910,7 @@ void MacroAssembler::LoadNumberAsInt32(Register object,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch.is(object));
@@ -1625,6 +1665,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2065,12 +2106,14 @@ void MacroAssembler::CheckFastSmiElements(Register map,
}
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Label* fail,
- int elements_offset) {
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset) {
Label smi_value, store;
// Handle smi values specially.
@@ -2083,23 +2126,24 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
fail,
DONT_DO_SMI_CHECK);
- vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Force a canonical NaN.
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
Assert(ne, "Default NaN mode not set");
}
- VFPCanonicalizeNaN(d0);
+ VFPCanonicalizeNaN(double_scratch);
b(&store);
bind(&smi_value);
- SmiToDouble(d0, value_reg);
+ SmiToDouble(double_scratch, value_reg);
bind(&store);
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
- vstr(d0, FieldMemOperand(scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
+ vstr(double_scratch,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
@@ -2405,8 +2449,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) {
- ASSERT(value.code() < 16);
+void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFP3)) {
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
@@ -2419,7 +2462,7 @@ void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) {
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low());
@@ -2429,7 +2472,7 @@ void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
@@ -2441,13 +2484,15 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void MacroAssembler::TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* done,
Label* exact) {
ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch));
Label negative, exception;
+ VmovHigh(input_high, double_input);
+
// Test for NaN and infinities.
Sbfx(result, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
@@ -2488,7 +2533,7 @@ void MacroAssembler::ECMAToInt32(Register result,
Register scratch,
Register scratch_high,
Register scratch_low,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!scratch_high.is(result));
ASSERT(!scratch_low.is(result));
ASSERT(!scratch_low.is(scratch_high));
@@ -3142,8 +3187,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
- DwVfpRegister double_scratch,
- SwVfpRegister single_scratch,
+ LowDwVfpRegister double_scratch,
int field_count) {
int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
for (int i = 0; i < double_count; i++) {
@@ -3156,9 +3200,9 @@ void MacroAssembler::CopyFields(Register dst,
int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
if (remain != 0) {
- vldr(single_scratch,
+ vldr(double_scratch.low(),
FieldMemOperand(src, (field_count - 1) * kPointerSize));
- vstr(single_scratch,
+ vstr(double_scratch.low(),
FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
@@ -3260,9 +3304,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
@@ -3275,9 +3320,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
@@ -3657,13 +3703,12 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg) {
+ LowDwVfpRegister double_scratch) {
Label above_zero;
Label done;
Label in_bounds;
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ VFPCompareAndSetFlags(input_reg, 0.0);
b(gt, &above_zero);
// Double value is less than zero, NaN or Inf, return 0.
@@ -3672,8 +3717,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Double value is >= 255, return 255.
bind(&above_zero);
- Vmov(temp_double_reg, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ Vmov(double_scratch, 255.0, result_reg);
+ VFPCompareAndSetFlags(input_reg, double_scratch);
b(le, &in_bounds);
mov(result_reg, Operand(255));
b(al, &done);
@@ -3685,8 +3730,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Set rounding mode to round to the nearest integer by clearing bits[23:22].
bic(result_reg, ip, Operand(kVFPRoundingModeMask));
vmsr(result_reg);
- vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, input_reg.low());
+ vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, double_scratch.low());
// Restore FPSCR.
vmsr(ip);
bind(&done);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 747dd3b882..38308e5cde 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -484,6 +484,11 @@ class MacroAssembler: public Assembler {
const double imm,
const Register scratch = no_reg);
+ void VmovHigh(Register dst, DwVfpRegister src);
+ void VmovHigh(DwVfpRegister dst, Register src);
+ void VmovLow(Register dst, DwVfpRegister src);
+ void VmovLow(DwVfpRegister dst, Register src);
+
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -494,14 +499,14 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
+ LowDwVfpRegister double_scratch2,
Label* not_int32);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
void LoadNumber(Register object,
- DwVfpRegister dst,
+ LowDwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number);
@@ -515,7 +520,7 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
@@ -528,7 +533,7 @@ class MacroAssembler: public Assembler {
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* not_int32);
@@ -796,8 +801,7 @@ class MacroAssembler: public Assembler {
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst,
Register src,
- DwVfpRegister double_scratch,
- SwVfpRegister single_scratch,
+ LowDwVfpRegister double_scratch,
int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
@@ -874,6 +878,7 @@ class MacroAssembler: public Assembler {
Register key_reg,
Register elements_reg,
Register scratch1,
+ LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset = 0);
@@ -957,26 +962,27 @@ class MacroAssembler: public Assembler {
// Load the value of a smi object into a double register.
// The register value must be between d0 and d15.
- void SmiToDouble(DwVfpRegister value, Register smi);
+ void SmiToDouble(LowDwVfpRegister value, Register smi);
// Check if a double can be exactly represented as a signed 32-bit integer.
// Z flag set to one if true.
void TestDoubleIsInt32(DwVfpRegister double_input,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Floor a double and writes the value to the result register.
// Go to exact if the conversion is exact (to be able to test -0),
// fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
void TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* done,
Label* exact);
@@ -989,7 +995,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Register scratch_high,
Register scratch_low,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
@@ -1311,7 +1317,7 @@ class MacroAssembler: public Assembler {
void ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg);
+ LowDwVfpRegister double_scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index c47f2ab80c..c9e3616d9d 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -3111,6 +3111,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
OS::MemCopy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register)
+ int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dn_value = get_double_from_d_register(vn);
+ int32_t data[2];
+ OS::MemCopy(data, &dn_value, 8);
+ set_register(instr->RtValue(), data[instr->Bit(21)]);
+ } else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index d7b1b55c20..f7fa9efaca 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -479,10 +479,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- if (details.type() == CONSTANT_FUNCTION) {
- Handle<HeapObject> constant(
- HeapObject::cast(descriptors->GetValue(descriptor)));
- __ LoadHeapObject(scratch1, constant);
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
@@ -543,7 +542,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- if (details.type() == CONSTANT_FUNCTION) {
+ if (details.type() == CONSTANT) {
ASSERT(value_reg.is(r0));
__ Ret();
return;
@@ -1399,9 +1398,9 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(r0, value);
+ __ LoadObject(r0, value);
__ Ret();
}
@@ -1814,7 +1813,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(r4, r0, elements, r5,
+ __ StoreNumberToDoubleElements(r4, r0, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
// Save new length.
@@ -2670,7 +2669,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Code> code = CompileCustomCall(object, holder,
Handle<Cell>::null(),
function, Handle<String>::cast(name),
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -3195,7 +3194,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* fail) {
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
@@ -3604,7 +3603,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
- scratch1, &transition_elements_kind);
+ scratch1, d0, &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3652,7 +3651,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
- scratch2, &transition_elements_kind);
+ scratch2, d0, &transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index f34c7bb24a..e0bca67aab 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -565,11 +565,16 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
- case CONSTANT_FUNCTION:
+ case CONSTANT: {
// We surely know the target for a constant function.
- target_ =
- Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return true;
+ Handle<Object> constant(lookup.GetConstantFromMap(*type),
+ type->GetIsolate());
+ if (constant->IsJSFunction()) {
+ target_ = Handle<JSFunction>::cast(constant);
+ return true;
+ }
+ // Fall through.
+ }
case NORMAL:
case FIELD:
case CALLBACKS:
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 281f8b918e..dda5fe42a5 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1095,12 +1095,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED));
+ Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED));
+ Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
#ifdef DEBUG
LookupResult lookup(isolate);
@@ -2475,14 +2475,13 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
to, key, value, details.attributes()));
break;
}
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
- Handle<JSFunction> fun =
- Handle<JSFunction>(descs->GetConstantFunction(i));
+ Handle<Object> constant(descs->GetConstant(i), isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, fun, details.attributes()));
+ to, key, constant, details.attributes()));
break;
}
case CALLBACKS: {
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 324dfa9f76..651ce0a0e8 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -129,7 +129,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- if (FLAG_trace_hydrogen) {
+ if (FLAG_trace_hydrogen_stubs) {
const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling stub %s using hydrogen\n", name);
@@ -178,7 +178,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
AddInstruction(context_);
start_environment->BindContext(context_);
- AddSimulate(BailoutId::StubEntry());
+ Add<HSimulate>(BailoutId::StubEntry());
NoObservableSideEffectsScope no_effects(this);
@@ -307,6 +307,37 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
template <>
+HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
+ HValue* value = GetParameter(0);
+
+ // Check if the parameter is already a SMI or heap number.
+ IfBuilder if_number(this);
+ if_number.If<HIsSmiAndBranch>(value);
+ if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
+ if_number.Then();
+
+ // Return the number.
+ Push(value);
+
+ if_number.Else();
+
+ // Convert the parameter to number using the builtin.
+ HValue* function = AddLoadJSBuiltin(Builtins::TO_NUMBER, context());
+ Add<HPushArgument>(value);
+ Push(Add<HInvokeFunction>(context(), function, 1));
+
+ if_number.End();
+
+ return Pop();
+}
+
+
+Handle<Code> ToNumberStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
@@ -366,9 +397,10 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
length));
}
- HValue* result = environment()->Pop();
checker.ElseDeopt();
- return result;
+ checker.End();
+
+ return environment()->Pop();
}
@@ -416,8 +448,11 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
AddStore(object, access, AddLoad(boilerplate, access));
}
+ environment()->Push(object);
checker.ElseDeopt();
- return object;
+ checker.End();
+
+ return environment()->Pop();
}
@@ -486,11 +521,11 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
+ Representation rep = casted_stub()->representation();
HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
- casted_stub()->representation()));
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
@@ -501,11 +536,11 @@ Handle<Code> LoadFieldStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
+ Representation rep = casted_stub()->representation();
HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
- casted_stub()->representation()));
+ HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
+ HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
@@ -850,23 +885,26 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- if (stub->is_constant()) {
- // Assume every store to a constant value changes it.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
- } else {
- HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ AddInstruction(HCheckMaps::New(
+ receiver, placeholder_map, zone(), top_info()));
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- AddInstruction(HCheckMaps::New(receiver, placeholder_map, zone()));
+ HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
+ HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
+ HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+ if (stub->is_constant()) {
+ IfBuilder builder(this);
+ builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
+ builder.Then();
+ builder.ElseDeopt();
+ builder.End();
+ } else {
// Load the payload of the global parameter cell. A hole indicates that the
// property has been deleted and that the store must be handled by the
// runtime.
- HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
- HValue* cell_contents = Add<HLoadNamedField>(cell, access);
IfBuilder builder(this);
HValue* hole_value = Add<HConstant>(hole, Representation::Tagged());
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
@@ -876,6 +914,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Add<HStoreNamedField>(cell, access, value);
builder.End();
}
+
return value;
}
@@ -894,8 +933,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
+ Add<HDeoptimize>(Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 33593544d6..bc581d837d 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -474,15 +474,19 @@ class InterruptStub : public PlatformCodeStub {
};
-class ToNumberStub: public PlatformCodeStub {
+class ToNumberStub: public HydrogenCodeStub {
public:
ToNumberStub() { }
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
Major MajorKey() { return ToNumber; }
- int MinorKey() { return 0; }
+ int NotMissMinorKey() { return 0; }
};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 8029d2f882..2031b321a4 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -125,6 +125,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
+ AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code ||
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index c2995773e3..ebd126659b 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -969,7 +969,9 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ closure->PrintName();
+ PrintF(" on next run.\n");
}
return;
}
@@ -1247,10 +1249,13 @@ CompilationPhase::~CompilationPhase() {
bool CompilationPhase::ShouldProduceTraceOutput() const {
- // Produce trace output if flag is set so that the first letter of the
- // phase name matches the command line parameter FLAG_trace_phase.
- return (FLAG_trace_hydrogen &&
- OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+ // Trace if the appropriate trace flag is set and the phase name's first
+ // character is in the FLAG_trace_phase command line parameter.
+ bool tracing_on = info()->IsStub() ?
+ FLAG_trace_hydrogen_stubs :
+ FLAG_trace_hydrogen;
+ return (tracing_on &&
+ OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index d3fadb52d4..0d226cfb3f 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -264,7 +264,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- String* source, int line) {
+ Name* source, int line) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 44e63fed49..66e2b8bd18 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -173,14 +173,14 @@ class ProfilerEventsProcessor : public Thread {
};
-#define PROFILE(IsolateGetter, Call) \
- do { \
- Isolate* cpu_profiler_isolate = (IsolateGetter); \
- LOG_CODE_EVENT(cpu_profiler_isolate, Call); \
- CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
- if (cpu_profiler->is_profiling()) { \
- cpu_profiler->Call; \
- } \
+#define PROFILE(IsolateGetter, Call) \
+ do { \
+ Isolate* cpu_profiler_isolate = (IsolateGetter); \
+ v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
+ CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
+ if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
+ logger->Call; \
+ } \
} while (false)
@@ -223,7 +223,7 @@ class CpuProfiler {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- String* source, int line);
+ Name* source, int line);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
void CodeMovingGCEvent() {}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index e576e9cb37..1efe2ae0b4 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -457,16 +457,6 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-void Shell::EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
- V8::ResumeProfiler();
-}
-
-
-void Shell::DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
- V8::PauseProfiler();
-}
-
-
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
@@ -857,10 +847,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
- global_template->Set(String::New("enableProfiler"),
- FunctionTemplate::New(EnableProfiler));
- global_template->Set(String::New("disableProfiler"),
- FunctionTemplate::New(DisableProfiler));
// Bind the Realm object.
Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
@@ -1561,11 +1547,12 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifdef V8_SHARED
-static void EnableHarmonyTypedArraysViaCommandLine() {
- int fake_argc = 2;
- char **fake_argv = new char*[2];
+static void SetStandaloneFlagsViaCommandLine() {
+ int fake_argc = 3;
+ char **fake_argv = new char*[3];
fake_argv[0] = NULL;
fake_argv[1] = strdup("--harmony-typed-arrays");
+ fake_argv[2] = strdup("--trace-hydrogen-file=hydrogen.cfg");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
delete[] fake_argv;
@@ -1586,8 +1573,9 @@ int Shell::Main(int argc, char* argv[]) {
#ifndef V8_SHARED
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
+ i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
#else
- EnableHarmonyTypedArraysViaCommandLine();
+ SetStandaloneFlagsViaCommandLine();
#endif
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 804cc4655f..4f04342cf4 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -317,8 +317,6 @@ class Shell : public i::AllStatic {
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
static Handle<String> ReadFromStdin(Isolate* isolate);
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 88efbe212a..a588b4c21d 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -1469,8 +1469,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.suspendRequest_(request, response);
} else if (request.command == 'version') {
this.versionRequest_(request, response);
- } else if (request.command == 'profile') {
- this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
} else if (request.command == 'restartframe') {
@@ -2400,18 +2398,6 @@ DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (request.arguments.command == 'resume') {
- %ProfilerResume();
- } else if (request.arguments.command == 'pause') {
- %ProfilerPause();
- } else {
- return response.failed('Unknown command');
- }
- response.body = {};
-};
-
-
DebugCommandProcessor.prototype.changeLiveRequest_ = function(
request, response) {
if (!request.arguments) {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 04f8a7a027..8454438815 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -2044,6 +2044,10 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
+ if (FLAG_parallel_recompilation) {
+ isolate_->optimizing_compiler_thread()->Flush();
+ }
+
Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index fd7c2829ad..53b9b76377 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -331,34 +331,47 @@ void Deoptimizer::VisitAllOptimizedFunctions(
// Removes the functions selected by the given filter from the optimized
-// function list of the given context and partitions the removed functions
-// into one or more lists such that all functions in a list share the same
-// code. The head of each list is written in the deoptimizing_functions field
-// of the corresponding code object.
-// The found code objects are returned in the given zone list.
-static void PartitionOptimizedFunctions(Context* context,
- OptimizedFunctionFilter* filter,
- ZoneList<Code*>* partitions,
- Zone* zone,
- Object* undefined) {
+// function list of the given context and adds their code to the list of
+// code objects to be deoptimized.
+static void SelectCodeToDeoptimize(Context* context,
+ OptimizedFunctionFilter* filter,
+ ZoneList<Code*>* codes,
+ Zone* zone,
+ Object* undefined) {
DisallowHeapAllocation no_allocation;
Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
Object* remainder_head = undefined;
Object* remainder_tail = undefined;
- ASSERT_EQ(0, partitions->length());
+
+ // TODO(titzer): rewrite to not modify unselected functions.
while (current != undefined) {
JSFunction* function = JSFunction::cast(current);
current = function->next_function_link();
if (filter->TakeFunction(function)) {
+ // Extract this function from the context's list and remember the code.
Code* code = function->code();
- if (code->deoptimizing_functions() == undefined) {
- partitions->Add(code, zone);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ if (code->marked_for_deoptimization()) {
+ ASSERT(codes->Contains(code));
} else {
- ASSERT(partitions->Contains(code));
+ code->set_marked_for_deoptimization(true);
+ codes->Add(code, zone);
+ }
+ SharedFunctionInfo* shared = function->shared();
+ // Replace the function's code with the shared code.
+ function->set_code(shared->code());
+ // Evict the code from the optimized code map.
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
+ // Remove the function from the optimized functions list.
+ function->set_next_function_link(undefined);
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
- function->set_next_function_link(code->deoptimizing_functions());
- code->set_deoptimizing_functions(function);
} else {
+ // Don't select this function; link it back into the list.
if (remainder_head == undefined) {
remainder_head = function;
} else {
@@ -393,6 +406,14 @@ class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
};
+class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code()->marked_for_deoptimization();
+ }
+};
+
+
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
@@ -421,19 +442,11 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- if (!function->IsOptimized()) return;
Code* code = function->code();
- Context* context = function->context()->native_context();
- Isolate* isolate = context->GetIsolate();
- Object* undefined = isolate->heap()->undefined_value();
- Zone zone(isolate);
- ZoneList<Code*> codes(1, &zone);
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
DeoptimizeWithMatchingCodeFilter filter(code);
- PartitionOptimizedFunctions(context, &filter, &codes, &zone, undefined);
- ASSERT_EQ(1, codes.length());
- DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction::cast(codes.at(0)->deoptimizing_functions()));
- codes.at(0)->set_deoptimizing_functions(undefined);
+ DeoptimizeAllFunctionsForContext(
+ function->context()->native_context(), &filter);
}
@@ -443,12 +456,10 @@ void Deoptimizer::DeoptimizeAllFunctionsForContext(
Isolate* isolate = context->GetIsolate();
Object* undefined = isolate->heap()->undefined_value();
Zone zone(isolate);
- ZoneList<Code*> codes(1, &zone);
- PartitionOptimizedFunctions(context, filter, &codes, &zone, undefined);
- for (int i = 0; i < codes.length(); ++i) {
- DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction::cast(codes.at(i)->deoptimizing_functions()));
- codes.at(i)->set_deoptimizing_functions(undefined);
+ ZoneList<Code*> codes(4, &zone);
+ SelectCodeToDeoptimize(context, filter, &codes, &zone, undefined);
+ for (int i = 0; i < codes.length(); i++) {
+ DeoptimizeCode(isolate, codes.at(i));
}
}
@@ -466,6 +477,55 @@ void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
}
+void Deoptimizer::DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes) {
+ if (codes->length() == 0) return; // Nothing to do.
+
+ // Mark the code; any functions refering to this code will be selected.
+ for (int i = 0; i < codes->length(); i++) {
+ ASSERT(!codes->at(i)->marked_for_deoptimization());
+ codes->at(i)->set_marked_for_deoptimization(true);
+ }
+
+ // For all contexts, remove optimized functions that refer to the selected
+ // code from the optimized function lists.
+ Object* undefined = isolate->heap()->undefined_value();
+ Zone zone(isolate);
+ Object* list = isolate->heap()->native_contexts_list();
+ DeoptimizeMarkedCodeFilter filter;
+ while (!list->IsUndefined()) {
+ Context* context = Context::cast(list);
+ // Note that selecting code unlinks the functions that refer to it.
+ SelectCodeToDeoptimize(context, &filter, codes, &zone, undefined);
+ list = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ }
+
+ // Now deoptimize all the code.
+ for (int i = 0; i < codes->length(); i++) {
+ DeoptimizeCode(isolate, codes->at(i));
+ }
+}
+
+
+void Deoptimizer::DeoptimizeCode(Isolate* isolate, Code* code) {
+ HandleScope scope(isolate);
+ DisallowHeapAllocation nha;
+
+ // Do platform-specific patching of the optimized code.
+ PatchCodeForDeoptimization(isolate, code);
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
+
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+}
+
+
void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* parameter) {
@@ -900,15 +960,15 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
+ output_offset -= kPCOnStackSize;
+ input_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetPc();
}
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerPc(output_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
@@ -919,14 +979,14 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
+ output_offset -= kFPOnStackSize;
+ input_offset -= kFPOnStackSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetFp();
}
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
has_alignment_padding_ * kPointerSize) == fp_value);
@@ -1049,9 +1109,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
// Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
+ output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
+ output_frame->SetCallerPc(output_offset, callers_pc);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
@@ -1059,9 +1119,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
// Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
+ output_offset -= kFPOnStackSize;
intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (trace_) {
@@ -1152,9 +1212,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
// Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
+ output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
+ output_frame->SetCallerPc(output_offset, callers_pc);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
@@ -1162,9 +1222,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
// Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
+ output_offset -= kFPOnStackSize;
intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (trace_) {
@@ -1265,7 +1325,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
// MacroAssembler::EnterFrame). For a setter stub frame we need one additional
// entry for the implicit return value, see
// StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
+ (kFPOnStackSize / kPointerSize) + 3 +
+ (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1287,9 +1349,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
unsigned output_offset = output_frame_size;
// Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
+ output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
+ output_frame->SetCallerPc(output_offset, callers_pc);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's pc\n",
@@ -1297,9 +1359,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
// Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
+ output_offset -= kFPOnStackSize;
intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (trace_) {
@@ -1435,10 +1497,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetTop(top_address);
// Read caller's PC (JSFunction continuation) from the input frame.
- unsigned input_frame_offset = input_frame_size - kPointerSize;
- unsigned output_frame_offset = output_frame_size - kPointerSize;
+ unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
+ unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
+ output_frame->SetCallerPc(output_frame_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
@@ -1446,10 +1508,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
// Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
+ input_frame_offset -= kFPOnStackSize;
value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
+ output_frame_offset -= kFPOnStackSize;
+ output_frame->SetCallerFp(output_frame_offset, value);
intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
@@ -2569,21 +2631,6 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
}
-void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
- Code* code) {
- SharedFunctionInfo* shared = function->shared();
- Object* undefined = function->GetHeap()->undefined_value();
- Object* current = function;
-
- while (current != undefined) {
- JSFunction* func = JSFunction::cast(current);
- current = func->next_function_link();
- func->set_code(shared->code());
- func->set_next_function_link(undefined);
- }
-}
-
-
FrameDescription::FrameDescription(uint32_t frame_size,
JSFunction* function)
: frame_size_(frame_size),
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index d28be236ed..7ad1ab0b2e 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -197,6 +197,8 @@ class Deoptimizer : public Malloced {
static void DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter);
+ static void DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes);
+
static void DeoptimizeAllFunctionsForContext(
Context* context, OptimizedFunctionFilter* filter);
@@ -411,9 +413,11 @@ class Deoptimizer : public Malloced {
v8::Persistent<v8::Value>* obj,
void* data);
- // Deoptimize function assuming that function->next_function_link() points
- // to a list that contains all functions that share the same optimized code.
- static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function);
+ // Deoptimize the given code and add to appropriate deoptimization lists.
+ static void DeoptimizeCode(Isolate* isolate, Code* code);
+
+ // Patch the given code so that it will deoptimize itself.
+ static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
@@ -510,6 +514,10 @@ class FrameDescription {
*GetFrameSlotPointer(offset) = value;
}
+ void SetCallerPc(unsigned offset, intptr_t value);
+
+ void SetCallerFp(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
ASSERT(n < ARRAY_SIZE(registers_));
return registers_[n];
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.cc b/deps/v8/src/extensions/i18n/i18n-utils.cc
index b720329f8b..eac1166904 100644
--- a/deps/v8/src/extensions/i18n/i18n-utils.cc
+++ b/deps/v8/src/extensions/i18n/i18n-utils.cc
@@ -142,40 +142,34 @@ void Utils::AsciiToUChar(const char* source,
// static
-// Chrome Linux doesn't like static initializers in class, so we create
-// template on demand.
v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
- static v8::Persistent<v8::ObjectTemplate> icu_template;
-
- if (icu_template.IsEmpty()) {
+ i::Isolate* internal = reinterpret_cast<i::Isolate*>(isolate);
+ if (internal->heap()->i18n_template_one() ==
+ internal->heap()->the_hole_value()) {
v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
-
- // Set aside internal field for ICU class.
raw_template->SetInternalFieldCount(1);
-
- icu_template.Reset(isolate, raw_template);
+ internal->heap()
+ ->SetI18nTemplateOne(*v8::Utils::OpenHandle(*raw_template));
}
- return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template);
+ return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(
+ internal->factory()->i18n_template_one()));
}
// static
-// Chrome Linux doesn't like static initializers in class, so we create
-// template on demand. This one has 2 internal fields.
v8::Local<v8::ObjectTemplate> Utils::GetTemplate2(v8::Isolate* isolate) {
- static v8::Persistent<v8::ObjectTemplate> icu_template_2;
-
- if (icu_template_2.IsEmpty()) {
+ i::Isolate* internal = reinterpret_cast<i::Isolate*>(isolate);
+ if (internal->heap()->i18n_template_two() ==
+ internal->heap()->the_hole_value()) {
v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
-
- // Set aside internal field for ICU class and additional data.
raw_template->SetInternalFieldCount(2);
-
- icu_template_2.Reset(isolate, raw_template);
+ internal->heap()
+ ->SetI18nTemplateTwo(*v8::Utils::OpenHandle(*raw_template));
}
- return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template_2);
+ return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(
+ internal->factory()->i18n_template_two()));
}
} // namespace v8_i18n
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index b135a9c670..52a65887a3 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -1221,6 +1221,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
shared->set_num_literals(literals_array_size);
if (is_generator) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
+ shared->DisableOptimization("generator");
}
return shared;
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 63cf66313c..8888aed26c 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -208,6 +208,7 @@ DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
+DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -236,7 +237,9 @@ DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
+DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
+DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
+DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
@@ -265,6 +268,8 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(idefs, false, "use informative definitions")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
+DEFINE_bool(array_bounds_checks_hoisting, false,
+ "perform array bounds checks hoisting")
DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
@@ -307,6 +312,9 @@ DEFINE_int(parallel_recompilation_delay, 0,
DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
"do not emit prototype checks if all prototypes have leaf maps, "
"deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_bool(omit_map_checks_for_leaf_maps, true,
+ "do not emit check maps for constant values that have a leaf map, "
+ "deoptimize the optimized code if the layout of the maps changes.")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index d097ed1dbb..2b15bfffab 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -334,10 +334,10 @@ inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
}
-inline JavaScriptFrame* SafeStackFrameIterator::frame() const {
+inline StackFrame* SafeStackFrameIterator::frame() const {
ASSERT(!done());
- ASSERT(frame_->is_java_script());
- return static_cast<JavaScriptFrame*>(frame_);
+ ASSERT(frame_->is_java_script() || frame_->is_exit());
+ return frame_;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 890e77ad63..61792a628c 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -36,6 +36,7 @@
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "string-stream.h"
+#include "vm-state-inl.h"
#include "allocation-inl.h"
@@ -221,7 +222,8 @@ SafeStackFrameIterator::SafeStackFrameIterator(
: StackFrameIteratorBase(isolate, false),
low_bound_(sp),
high_bound_(js_entry_sp),
- top_frame_type_(StackFrame::NONE) {
+ top_frame_type_(StackFrame::NONE),
+ external_callback_scope_(isolate->external_callback_scope()) {
StackFrame::State state;
StackFrame::Type type;
ThreadLocalTop* top = isolate->thread_local_top();
@@ -256,16 +258,28 @@ SafeStackFrameIterator::SafeStackFrameIterator(
}
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
+ if (frame_ == NULL) return;
+
+ Advance();
- if (!done()) Advance();
+ if (frame_ != NULL && !frame_->is_exit() &&
+ external_callback_scope_ != NULL &&
+ external_callback_scope_->scope_address() < frame_->fp()) {
+ // Skip top ExternalCallbackScope if we already advanced to a JS frame
+ // under it. Sampler will anyways take this top external callback.
+ external_callback_scope_ = external_callback_scope_->previous();
+ }
}
bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
- Address fp = Isolate::c_entry_fp(top);
- if (!IsValidExitFrame(fp)) return false;
+ Address c_entry_fp = Isolate::c_entry_fp(top);
+ if (!IsValidExitFrame(c_entry_fp)) return false;
// There should be at least one JS_ENTRY stack handler.
- return Isolate::handler(top) != NULL;
+ Address handler = Isolate::handler(top);
+ if (handler == NULL) return false;
+ // Check that there are no js frames on top of the native frames.
+ return c_entry_fp < handler;
}
@@ -340,6 +354,24 @@ void SafeStackFrameIterator::Advance() {
AdvanceOneFrame();
if (done()) return;
if (frame_->is_java_script()) return;
+ if (frame_->is_exit() && external_callback_scope_) {
+ // Some of the EXIT frames may have ExternalCallbackScope allocated on
+ // top of them. In that case the scope corresponds to the first EXIT
+ // frame beneath it. There may be other EXIT frames on top of the
+ // ExternalCallbackScope, just skip them as we cannot collect any useful
+ // information about them.
+ if (external_callback_scope_->scope_address() < frame_->fp()) {
+ Address* callback_address =
+ external_callback_scope_->callback_address();
+ if (*callback_address != NULL) {
+ frame_->state_.pc_address = callback_address;
+ }
+ external_callback_scope_ = external_callback_scope_->previous();
+ ASSERT(external_callback_scope_ == NULL ||
+ external_callback_scope_->scope_address() > frame_->fp());
+ return;
+ }
+ }
}
}
@@ -540,7 +572,7 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(sp - 1 * kPointerSize));
+ reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 7e667a6acd..634ff8a7cb 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -47,6 +47,7 @@ int JSCallerSavedCode(int n);
// Forward declarations.
+class ExternalCallbackScope;
class StackFrameIteratorBase;
class ThreadLocalTop;
class Isolate;
@@ -92,7 +93,7 @@ class StackHandlerConstants : public AllStatic {
static const int kContextOffset = 3 * kPointerSize;
static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kFPOffset + kPointerSize;
+ static const int kSize = kFPOffset + kFPOnStackSize;
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@@ -168,13 +169,14 @@ class StandardFrameConstants : public AllStatic {
// context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
- static const int kFixedFrameSize = 4 * kPointerSize;
+ static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
+ 2 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = +2 * kPCOnStackSize;
};
@@ -883,7 +885,7 @@ class SafeStackFrameIterator: public StackFrameIteratorBase {
Address fp, Address sp,
Address js_entry_sp);
- inline JavaScriptFrame* frame() const;
+ inline StackFrame* frame() const;
void Advance();
StackFrame::Type top_frame_type() const { return top_frame_type_; }
@@ -902,6 +904,7 @@ class SafeStackFrameIterator: public StackFrameIteratorBase {
const Address low_bound_;
const Address high_bound_;
StackFrame::Type top_frame_type_;
+ ExternalCallbackScope* external_callback_scope_;
};
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 5c65635d0f..88ebe31647 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -262,6 +262,7 @@ class GlobalHandles::Node {
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState<EXTERNAL> state(isolate);
+ HandleScope handle_scope(isolate);
weak_reference_callback_(reinterpret_cast<v8::Isolate*>(isolate),
reinterpret_cast<Persistent<Value>*>(&object),
par);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index e695e94d4e..26fd53114c 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -239,12 +239,15 @@ const int kMinInt = -kMaxInt - 1;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char); // NOLINT
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int); // NOLINT
+const int kDoubleSize = sizeof(double); // NOLINT
+const int kIntptrSize = sizeof(intptr_t); // NOLINT
+const int kPointerSize = sizeof(void*); // NOLINT
+const int kRegisterSize = kPointerSize;
+const int kPCOnStackSize = kRegisterSize;
+const int kFPOnStackSize = kRegisterSize;
const int kDoubleSizeLog2 = 3;
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 97c56df0c0..3c1d4d274b 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -712,19 +712,6 @@ void ExternalStringTable::ShrinkNewStrings(int position) {
}
-void ErrorObjectList::Add(JSObject* object) {
- list_.Add(object);
-}
-
-
-void ErrorObjectList::Iterate(ObjectVisitor* v) {
- if (!list_.is_empty()) {
- Object** start = &list_[0];
- v->VisitPointers(start, start + list_.length());
- }
-}
-
-
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(the_hole_value());
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 3b1f235e75..9f9f84a01d 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -1339,10 +1339,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
}
break;
}
- case CONSTANT_FUNCTION:
+ case CONSTANT:
SetPropertyReference(
js_obj, entry,
- descs->GetKey(i), descs->GetConstantFunction(i));
+ descs->GetKey(i), descs->GetConstant(i));
break;
case CALLBACKS:
ExtractAccessorPairProperty(
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index dff217a991..5cd85445b4 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -583,8 +583,6 @@ void Heap::GarbageCollectionEpilogue() {
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->AfterGarbageCollection();
#endif // ENABLE_DEBUGGER_SUPPORT
-
- error_object_list_.DeferredFormatStackTrace(isolate());
}
@@ -705,6 +703,16 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
+int Heap::NotifyContextDisposed() {
+ if (FLAG_parallel_recompilation) {
+ // Flush the queued recompilation tasks.
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
+ flush_monomorphic_ics_ = true;
+ return ++contexts_disposed_;
+}
+
+
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
@@ -922,6 +930,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
{
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
@@ -1027,6 +1036,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
{
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type);
}
@@ -1428,8 +1438,6 @@ void Heap::Scavenge() {
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
- error_object_list_.UpdateReferencesInNewSpace(this);
-
promotion_queue_.Destroy();
if (!FLAG_watch_ic_patching) {
@@ -3215,6 +3223,9 @@ bool Heap::CreateInitialObjects() {
}
set_observed_symbol(Symbol::cast(obj));
+ set_i18n_template_one(the_hole_value());
+ set_i18n_template_two(the_hole_value());
+
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
@@ -5353,25 +5364,16 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
if (length < 0 || length > SeqOneByteString::kMaxLength) {
return Failure::OutOfMemoryException(0xb);
}
-
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
-
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
+ if (size > Page::kMaxNonCodeHeapObjectSize) {
+ // Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
}
+
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5397,18 +5399,11 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
- if (space == NEW_SPACE) {
- if (size > kMaxObjectSizeInNewSpace) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- } else if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in new space, retry in large object space.
- retry_space = LO_SPACE;
- }
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
+ if (size > Page::kMaxNonCodeHeapObjectSize) {
+ // Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
}
+
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5482,7 +5477,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
- return size <= kMaxObjectSizeInNewSpace
+ return size <= Page::kMaxNonCodeHeapObjectSize
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
}
@@ -5553,22 +5548,16 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException(0xe);
}
-
+ int size = FixedArray::SizeFor(length);
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- int size = FixedArray::SizeFor(length);
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_POINTER_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old pointer space.
+ AllocationSpace retry_space = OLD_POINTER_SPACE;
+
+ if (size > Page::kMaxNonCodeHeapObjectSize) {
+ // Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
}
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
-
return AllocateRaw(size, space, retry_space);
}
@@ -5686,27 +5675,19 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
return Failure::OutOfMemoryException(0xf);
}
-
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = FixedDoubleArray::SizeFor(length);
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace retry_space = OLD_DATA_SPACE;
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
- if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
- // Too big for new space.
- space = LO_SPACE;
- } else if (space == OLD_DATA_SPACE &&
- size > Page::kMaxNonCodeHeapObjectSize) {
- // Too big for old data space.
+ if (size > Page::kMaxNonCodeHeapObjectSize) {
+ // Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
}
- AllocationSpace retry_space =
- (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
-
HeapObject* object;
{ MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
@@ -6575,7 +6556,6 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
- error_object_list_.Iterate(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
@@ -6975,8 +6955,6 @@ void Heap::TearDown() {
external_string_table_.TearDown();
- error_object_list_.TearDown();
-
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@@ -7929,120 +7907,6 @@ void ExternalStringTable::TearDown() {
}
-// Update all references.
-void ErrorObjectList::UpdateReferences() {
- for (int i = 0; i < list_.length(); i++) {
- HeapObject* object = HeapObject::cast(list_[i]);
- MapWord first_word = object->map_word();
- if (first_word.IsForwardingAddress()) {
- list_[i] = first_word.ToForwardingAddress();
- }
- }
-}
-
-
-// Unforwarded objects in new space are dead and removed from the list.
-void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
- if (list_.is_empty()) return;
- if (!nested_) {
- int write_index = 0;
- for (int i = 0; i < list_.length(); i++) {
- MapWord first_word = HeapObject::cast(list_[i])->map_word();
- if (first_word.IsForwardingAddress()) {
- list_[write_index++] = first_word.ToForwardingAddress();
- }
- }
- list_.Rewind(write_index);
- } else {
- // If a GC is triggered during DeferredFormatStackTrace, we do not move
- // objects in the list, just remove dead ones, as to not confuse the
- // loop in DeferredFormatStackTrace.
- for (int i = 0; i < list_.length(); i++) {
- MapWord first_word = HeapObject::cast(list_[i])->map_word();
- list_[i] = first_word.IsForwardingAddress()
- ? first_word.ToForwardingAddress()
- : heap->the_hole_value();
- }
- }
-}
-
-
-void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
- // If formatting the stack trace causes a GC, this method will be
- // recursively called. In that case, skip the recursive call, since
- // the loop modifies the list while iterating over it.
- if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
- nested_ = true;
- HandleScope scope(isolate);
- Handle<String> stack_key = isolate->factory()->stack_string();
- int write_index = 0;
- int budget = kBudgetPerGC;
- for (int i = 0; i < list_.length(); i++) {
- Object* object = list_[i];
- JSFunction* getter_fun;
-
- { DisallowHeapAllocation no_gc;
- // Skip possible holes in the list.
- if (object->IsTheHole()) continue;
- if (isolate->heap()->InNewSpace(object) || budget == 0) {
- list_[write_index++] = object;
- continue;
- }
-
- // Check whether the stack property is backed by the original getter.
- LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
- if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
- Object* callback = lookup.GetCallbackObject();
- if (!callback->IsAccessorPair()) continue;
- Object* getter_obj = AccessorPair::cast(callback)->getter();
- if (!getter_obj->IsJSFunction()) continue;
- getter_fun = JSFunction::cast(getter_obj);
- String* key = isolate->heap()->hidden_stack_trace_string();
- Object* value = getter_fun->GetHiddenProperty(key);
- if (key != value) continue;
- }
-
- budget--;
- HandleScope scope(isolate);
- bool has_exception = false;
-#ifdef DEBUG
- Handle<Map> map(HeapObject::cast(object)->map(), isolate);
-#endif
- Handle<Object> object_handle(object, isolate);
- Handle<Object> getter_handle(getter_fun, isolate);
- Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
- ASSERT(*map == HeapObject::cast(*object_handle)->map());
- if (has_exception) {
- // Hit an exception (most likely a stack overflow).
- // Wrap up this pass and retry after another GC.
- isolate->clear_pending_exception();
- // We use the handle since calling the getter might have caused a GC.
- list_[write_index++] = *object_handle;
- budget = 0;
- }
- }
- list_.Rewind(write_index);
- list_.Trim();
- nested_ = false;
-}
-
-
-void ErrorObjectList::RemoveUnmarked(Heap* heap) {
- for (int i = 0; i < list_.length(); i++) {
- HeapObject* object = HeapObject::cast(list_[i]);
- if (!Marking::MarkBitFrom(object).Get()) {
- list_[i] = heap->the_hole_value();
- }
- }
-}
-
-
-void ErrorObjectList::TearDown() {
- list_.Free();
-}
-
-
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 6b0236330f..5e8a2e516d 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -188,7 +188,9 @@ namespace internal {
V(Symbol, frozen_symbol, FrozenSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(Symbol, observed_symbol, ObservedSymbol)
+ V(Symbol, observed_symbol, ObservedSymbol) \
+ V(HeapObject, i18n_template_one, I18nTemplateOne) \
+ V(HeapObject, i18n_template_two, I18nTemplateTwo)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -475,41 +477,6 @@ class ExternalStringTable {
};
-// The stack property of an error object is implemented as a getter that
-// formats the attached raw stack trace into a string. This raw stack trace
-// keeps code and function objects alive until the getter is called the first
-// time. To release those objects, we call the getter after each GC for
-// newly tenured error objects that are kept in a list.
-class ErrorObjectList {
- public:
- inline void Add(JSObject* object);
-
- inline void Iterate(ObjectVisitor* v);
-
- void TearDown();
-
- void RemoveUnmarked(Heap* heap);
-
- void DeferredFormatStackTrace(Isolate* isolate);
-
- void UpdateReferences();
-
- void UpdateReferencesInNewSpace(Heap* heap);
-
- private:
- static const int kBudgetPerGC = 16;
-
- ErrorObjectList() : nested_(false) { }
-
- friend class Heap;
-
- List<Object*> list_;
- bool nested_;
-
- DISALLOW_COPY_AND_ASSIGN(ErrorObjectList);
-};
-
-
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -1287,10 +1254,7 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() {
- flush_monomorphic_ics_ = true;
- return ++contexts_disposed_;
- }
+ int NotifyContextDisposed();
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -1333,6 +1297,12 @@ class Heap {
ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
global_gc_epilogue_callback_ = callback;
}
+ void SetI18nTemplateOne(ObjectTemplateInfo* tmpl) {
+ set_i18n_template_one(tmpl);
+ }
+ void SetI18nTemplateTwo(ObjectTemplateInfo* tmpl) {
+ set_i18n_template_two(tmpl);
+ }
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -1716,8 +1686,6 @@ class Heap {
// we try to promote this object.
inline bool ShouldBePromoted(Address old_address, int object_size);
- int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
-
void ClearJSFunctionResultCaches();
void ClearNormalizedMapCaches();
@@ -1798,10 +1766,6 @@ class Heap {
return &external_string_table_;
}
- ErrorObjectList* error_object_list() {
- return &error_object_list_;
- }
-
// Returns the current sweep generation.
int sweep_generation() {
return sweep_generation_;
@@ -1966,12 +1930,6 @@ class Heap {
int scan_on_scavenge_pages_;
-#if V8_TARGET_ARCH_X64
- static const int kMaxObjectSizeInNewSpace = 1024*KB;
-#else
- static const int kMaxObjectSizeInNewSpace = 512*KB;
-#endif
-
NewSpace new_space_;
OldSpace* old_pointer_space_;
OldSpace* old_data_space_;
@@ -2406,8 +2364,6 @@ class Heap {
ExternalStringTable external_string_table_;
- ErrorObjectList error_object_list_;
-
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
MemoryChunk* chunks_queued_for_free_;
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index e50cd7aaf0..ff0b072ce0 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -189,6 +189,8 @@ class BoundsCheckBbData: public ZoneObject {
}
if (!keep_new_check) {
+ new_check->block()->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
}
@@ -347,6 +349,8 @@ void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
NULL);
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
+ bb->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() != bb ||
!data->CoverCheck(check, offset)) {
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/hydrogen-bch.cc
new file mode 100644
index 0000000000..8646747caf
--- /dev/null
+++ b/deps/v8/src/hydrogen-bch.cc
@@ -0,0 +1,408 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-bch.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * This class is a table with one element for eack basic block.
+ *
+ * It is used to check if, inside one loop, all execution paths contain
+ * a bounds check for a particular [index, length] combination.
+ * The reason is that if there is a path that stays in the loop without
+ * executing a check then the check cannot be hoisted out of the loop (it
+ * would likely fail and cause a deopt for no good reason).
+ * We also check is there are paths that exit the loop early, and if yes we
+ * perform the hoisting only if graph()->use_optimistic_licm() is true.
+ * The reason is that such paths are realtively common and harmless (like in
+ * a "search" method that scans an array until an element is found), but in
+ * some cases they could cause a deopt if we hoist the check so this is a
+ * situation we need to detect.
+ */
+class InductionVariableBlocksTable BASE_EMBEDDED {
+ public:
+ class Element {
+ public:
+ static const int kNoBlock = -1;
+
+ HBasicBlock* block() { return block_; }
+ void set_block(HBasicBlock* block) { block_ = block; }
+ bool is_start() { return is_start_; }
+ bool is_proper_exit() { return is_proper_exit_; }
+ bool is_in_loop() { return is_in_loop_; }
+ bool has_check() { return has_check_; }
+ void set_has_check() { has_check_ = true; }
+ InductionVariableLimitUpdate* additional_limit() {
+ return &additional_limit_;
+ }
+
+ /*
+ * Initializes the table element for a given loop (identified by its
+ * induction variable).
+ */
+ void InitializeLoop(InductionVariableData* data) {
+ ASSERT(data->limit() != NULL);
+ HLoopInformation* loop = data->phi()->block()->current_loop();
+ is_start_ = (block() == loop->loop_header());
+ is_proper_exit_ = (block() == data->induction_exit_target());
+ is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
+ has_check_ = false;
+ }
+
+ // Utility methods to iterate over dominated blocks.
+ void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
+ HBasicBlock* CurrentDominatedBlock() {
+ ASSERT(current_dominated_block_ != kNoBlock);
+ return current_dominated_block_ < block()->dominated_blocks()->length() ?
+ block()->dominated_blocks()->at(current_dominated_block_) : NULL;
+ }
+ HBasicBlock* NextDominatedBlock() {
+ current_dominated_block_++;
+ return CurrentDominatedBlock();
+ }
+
+ Element()
+ : block_(NULL), is_start_(false), is_proper_exit_(false),
+ has_check_(false), additional_limit_(),
+ current_dominated_block_(kNoBlock) {}
+
+ private:
+ HBasicBlock* block_;
+ bool is_start_;
+ bool is_proper_exit_;
+ bool is_in_loop_;
+ bool has_check_;
+ InductionVariableLimitUpdate additional_limit_;
+ int current_dominated_block_;
+ };
+
+ HGraph* graph() { return graph_; }
+ HBasicBlock* loop_header() { return loop_header_; }
+ Element* at(int index) { return &(elements_.at(index)); }
+ Element* at(HBasicBlock* block) { return at(block->block_id()); }
+
+ void AddCheckAt(HBasicBlock* block) {
+ at(block->block_id())->set_has_check();
+ }
+
+ /*
+ * Initializes the table for a given loop (identified by its induction
+ * variable).
+ */
+ void InitializeLoop(InductionVariableData* data) {
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ at(i)->InitializeLoop(data);
+ }
+ loop_header_ = data->phi()->block()->current_loop()->loop_header();
+ }
+
+
+ enum Hoistability {
+ HOISTABLE,
+ OPTIMISTICALLY_HOISTABLE,
+ NOT_HOISTABLE
+ };
+
+ /*
+ * This method checks if it is appropriate to hoist the bounds checks on an
+ * induction variable out of the loop.
+ * The problem is that in the loop code graph there could be execution paths
+ * where the check is not performed, but hoisting the check has the same
+ * semantics as performing it at every loop iteration, which could cause
+ * unnecessary check failures (which would mean unnecessary deoptimizations).
+ * The method returns OK if there are no paths that perform an iteration
+ * (loop back to the header) without meeting a check, or UNSAFE is set if
+ * early exit paths are found.
+ */
+ Hoistability CheckHoistability() {
+ for (int i = 0; i < elements_.length(); i++) {
+ at(i)->ResetCurrentDominatedBlock();
+ }
+ bool unsafe = false;
+
+ HBasicBlock* current = loop_header();
+ while (current != NULL) {
+ HBasicBlock* next;
+
+ if (at(current)->has_check() || !at(current)->is_in_loop()) {
+ // We found a check or we reached a dominated block out of the loop,
+ // therefore this block is safe and we can backtrack.
+ next = NULL;
+ } else {
+ for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
+ Element* successor = at(current->end()->SuccessorAt(i));
+
+ if (!successor->is_in_loop()) {
+ if (!successor->is_proper_exit()) {
+ // We found a path that exits the loop early, and is not the exit
+ // related to the induction limit, therefore hoisting checks is
+ // an optimistic assumption.
+ unsafe = true;
+ }
+ }
+
+ if (successor->is_start()) {
+ // We found a path that does one loop iteration without meeting any
+ // check, therefore hoisting checks would be likely to cause
+ // unnecessary deopts.
+ return NOT_HOISTABLE;
+ }
+ }
+
+ next = at(current)->NextDominatedBlock();
+ }
+
+ // If we have no next block we need to backtrack the tree traversal.
+ while (next == NULL) {
+ current = current->dominator();
+ if (current != NULL) {
+ next = at(current)->NextDominatedBlock();
+ } else {
+ // We reached the root: next stays NULL.
+ next = NULL;
+ break;
+ }
+ }
+
+ current = next;
+ }
+
+ return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
+ }
+
+ explicit InductionVariableBlocksTable(HGraph* graph)
+ : graph_(graph), loop_header_(NULL),
+ elements_(graph->blocks()->length(), graph->zone()) {
+ for (int i = 0; i < graph->blocks()->length(); i++) {
+ Element element;
+ element.set_block(graph->blocks()->at(i));
+ elements_.Add(element, graph->zone());
+ ASSERT(at(i)->block()->block_id() == i);
+ }
+ }
+
+ // Tries to hoist a check out of its induction loop.
+ void ProcessRelatedChecks(
+ InductionVariableData::InductionVariableCheck* check,
+ InductionVariableData* data) {
+ HValue* length = check->check()->length();
+ check->set_processed();
+ HBasicBlock* header =
+ data->phi()->block()->current_loop()->loop_header();
+ HBasicBlock* pre_header = header->predecessors()->at(0);
+ // Check that the limit is defined in the loop preheader.
+ if (!data->limit()->IsInteger32Constant()) {
+ HBasicBlock* limit_block = data->limit()->block();
+ if (limit_block != pre_header &&
+ !limit_block->Dominates(pre_header)) {
+ return;
+ }
+ }
+ // Check that the length and limit have compatible representations.
+ if (!(data->limit()->representation().Equals(
+ length->representation()) ||
+ data->limit()->IsInteger32Constant())) {
+ return;
+ }
+ // Check that the length is defined in the loop preheader.
+ if (check->check()->length()->block() != pre_header &&
+ !check->check()->length()->block()->Dominates(pre_header)) {
+ return;
+ }
+
+ // Add checks to the table.
+ for (InductionVariableData::InductionVariableCheck* current_check = check;
+ current_check != NULL;
+ current_check = current_check->next()) {
+ if (current_check->check()->length() != length) continue;
+
+ AddCheckAt(current_check->check()->block());
+ current_check->set_processed();
+ }
+
+ // Check that we will not cause unwanted deoptimizations.
+ Hoistability hoistability = CheckHoistability();
+ if (hoistability == NOT_HOISTABLE ||
+ (hoistability == OPTIMISTICALLY_HOISTABLE &&
+ !graph()->use_optimistic_licm())) {
+ return;
+ }
+
+ // We will do the hoisting, but we must see if the limit is "limit" or if
+ // all checks are done on constants: if all check are done against the same
+ // constant limit we will use that instead of the induction limit.
+ bool has_upper_constant_limit = true;
+ InductionVariableData::InductionVariableCheck* current_check = check;
+ int32_t upper_constant_limit =
+ current_check != NULL && current_check->HasUpperLimit() ?
+ current_check->upper_limit() : 0;
+ while (current_check != NULL) {
+ if (check->HasUpperLimit()) {
+ if (check->upper_limit() != upper_constant_limit) {
+ has_upper_constant_limit = false;
+ }
+ } else {
+ has_upper_constant_limit = false;
+ }
+
+ current_check->check()->block()->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
+ current_check->check()->set_skip_check();
+ current_check = current_check->next();
+ }
+
+ // Choose the appropriate limit.
+ HValue* limit = data->limit();
+ if (has_upper_constant_limit) {
+ HConstant* new_limit = new(pre_header->graph()->zone()) HConstant(
+ upper_constant_limit, length->representation());
+ new_limit->InsertBefore(pre_header->end());
+ limit = new_limit;
+ }
+
+ // If necessary, redefine the limit in the preheader.
+ if (limit->IsInteger32Constant() &&
+ limit->block() != pre_header &&
+ !limit->block()->Dominates(pre_header)) {
+ HConstant* new_limit = new(pre_header->graph()->zone()) HConstant(
+ limit->GetInteger32Constant(), length->representation());
+ new_limit->InsertBefore(pre_header->end());
+ limit = new_limit;
+ }
+
+ // Do the hoisting.
+ HBoundsCheck* hoisted_check = new(pre_header->zone()) HBoundsCheck(
+ limit, check->check()->length());
+ hoisted_check->InsertBefore(pre_header->end());
+ hoisted_check->set_allow_equality(true);
+ hoisted_check->block()->graph()->isolate()->counters()->
+ bounds_checks_hoisted()->Increment();
+ }
+
+ void CollectInductionVariableData(HBasicBlock* bb) {
+ bool additional_limit = false;
+
+ for (int i = 0; i < bb->phis()->length(); i++) {
+ HPhi* phi = bb->phis()->at(i);
+ phi->DetectInductionVariable();
+ }
+
+ additional_limit = InductionVariableData::ComputeInductionVariableLimit(
+ bb, at(bb)->additional_limit());
+
+ if (additional_limit) {
+ at(bb)->additional_limit()->updated_variable->
+ UpdateAdditionalLimit(at(bb)->additional_limit());
+ }
+
+ for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+ if (!i->IsBoundsCheck()) continue;
+ HBoundsCheck* check = HBoundsCheck::cast(i);
+ InductionVariableData::BitwiseDecompositionResult decomposition;
+ InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+ if (!decomposition.base->IsPhi()) continue;
+ HPhi* phi = HPhi::cast(decomposition.base);
+
+ if (!phi->IsInductionVariable()) continue;
+ InductionVariableData* data = phi->induction_variable_data();
+
+ // For now ignore loops decrementing the index.
+ if (data->increment() <= 0) continue;
+ if (!data->LowerLimitIsNonNegativeConstant()) continue;
+
+ // TODO(mmassi): skip OSR values for check->length().
+ if (check->length() == data->limit() ||
+ check->length() == data->additional_upper_limit()) {
+ check->block()->graph()->isolate()->counters()->
+ bounds_checks_eliminated()->Increment();
+ check->set_skip_check();
+ continue;
+ }
+
+ if (!phi->IsLimitedInductionVariable()) continue;
+
+ int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
+ decomposition.or_mask);
+ phi->induction_variable_data()->AddCheck(check, limit);
+ }
+
+ for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
+ CollectInductionVariableData(bb->dominated_blocks()->at(i));
+ }
+
+ if (additional_limit) {
+ at(bb->block_id())->additional_limit()->updated_variable->
+ UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
+ }
+ }
+
+ void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
+ for (int i = 0; i < bb->phis()->length(); i++) {
+ HPhi* phi = bb->phis()->at(i);
+ if (!phi->IsLimitedInductionVariable()) continue;
+
+ InductionVariableData* induction_data = phi->induction_variable_data();
+ InductionVariableData::ChecksRelatedToLength* current_length_group =
+ induction_data->checks();
+ while (current_length_group != NULL) {
+ current_length_group->CloseCurrentBlock();
+ InductionVariableData::InductionVariableCheck* current_base_check =
+ current_length_group->checks();
+ InitializeLoop(induction_data);
+
+ while (current_base_check != NULL) {
+ ProcessRelatedChecks(current_base_check, induction_data);
+ while (current_base_check != NULL &&
+ current_base_check->processed()) {
+ current_base_check = current_base_check->next();
+ }
+ }
+
+ current_length_group = current_length_group->next();
+ }
+ }
+ }
+
+ private:
+ HGraph* graph_;
+ HBasicBlock* loop_header_;
+ ZoneList<Element> elements_;
+};
+
+
+void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
+ InductionVariableBlocksTable table(graph());
+ table.CollectInductionVariableData(graph()->entry_block());
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
+ }
+}
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/platform-tls.h b/deps/v8/src/hydrogen-bch.h
index 32516636be..a22dacdd42 100644
--- a/deps/v8/src/platform-tls.h
+++ b/deps/v8/src/hydrogen-bch.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,26 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform and architecture specific thread local store functions.
+#ifndef V8_HYDROGEN_BCH_H_
+#define V8_HYDROGEN_BCH_H_
-#ifndef V8_PLATFORM_TLS_H_
-#define V8_PLATFORM_TLS_H_
+#include "hydrogen.h"
-#ifndef V8_NO_FAST_TLS
+namespace v8 {
+namespace internal {
-// When fast TLS is requested we include the appropriate
-// implementation header.
-//
-// The implementation header defines V8_FAST_TLS_SUPPORTED if it
-// provides fast TLS support for the current platform and architecture
-// combination.
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
-#include "platform-tls-win32.h"
-#elif defined(__APPLE__)
-#include "platform-tls-mac.h"
-#endif
+class HBoundsCheckHoistingPhase : public HPhase {
+ public:
+ explicit HBoundsCheckHoistingPhase(HGraph* graph)
+ : HPhase("H_Bounds checks hoisting", graph) { }
+
+ void Run() {
+ HoistRedundantBoundsChecks();
+ }
+
+ private:
+ void HoistRedundantBoundsChecks();
+
+ DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
+};
+
-#endif
+} } // namespace v8::internal
-#endif // V8_PLATFORM_TLS_H_
+#endif // V8_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
index 40cbe4c065..643234392d 100644
--- a/deps/v8/src/hydrogen-canonicalize.cc
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -38,11 +38,18 @@ void HCanonicalizePhase::Run() {
for (int i = 0; i < blocks->length(); ++i) {
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->IsArithmeticBinaryOperation() &&
- instr->representation().IsInteger32() &&
- instr->HasAtLeastOneUseWithFlagAndNoneWithout(
- HInstruction::kTruncatingToInt32)) {
- instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ if (instr->IsArithmeticBinaryOperation()) {
+ if (instr->representation().IsInteger32()) {
+ if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ }
+ } else if (instr->representation().IsSmi()) {
+ if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToSmi)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
+ }
+ }
}
}
}
diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.cc b/deps/v8/src/hydrogen-deoptimizing-mark.cc
index 804d94753a..626848e012 100644
--- a/deps/v8/src/hydrogen-deoptimizing-mark.cc
+++ b/deps/v8/src/hydrogen-deoptimizing-mark.cc
@@ -107,7 +107,7 @@ void HPropagateDeoptimizingMarkPhase::NullifyUnreachableInstructions() {
instr->DeleteAndReplaceWith(last_dummy);
continue;
}
- if (instr->IsSoftDeoptimize()) {
+ if (instr->IsDeoptimize()) {
ASSERT(block->IsDeoptimizing());
nullify = true;
}
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
index 20e680c145..9efa47bd34 100644
--- a/deps/v8/src/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -172,15 +172,6 @@ void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
last_simulate_ = NULL;
break;
}
- case HValue::kDeoptimize: {
- // Keep all environment slots alive.
- HDeoptimize* deopt = HDeoptimize::cast(instr);
- for (int i = deopt->first_local_index();
- i < deopt->first_expression_index(); ++i) {
- live->Add(i);
- }
- break;
- }
case HValue::kSimulate:
last_simulate_ = HSimulate::cast(instr);
went_live_since_last_simulate_.Clear();
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 09bea5bb18..9a02a1dcf4 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -401,7 +401,7 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
side_effects.Add(instr->ChangesFlags());
- if (instr->IsSoftDeoptimize()) {
+ if (instr->IsDeoptimize()) {
block_side_effects_[id].RemoveAll();
side_effects.RemoveAll();
break;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 880de29aca..5fe3af1f5b 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -84,9 +84,9 @@ void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
- new_rep = RepresentationFromUseRequirements();
- if (new_rep.fits_into(Representation::Integer32())) {
- UpdateRepresentation(new_rep, h_infer, "use requirements");
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
}
}
@@ -259,34 +259,56 @@ HValue* RangeEvaluationContext::ConvertGuarantee(HValue* guarantee) {
}
-static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
- if (result > kMaxInt) {
- *overflow = true;
- return kMaxInt;
- }
- if (result < kMinInt) {
- *overflow = true;
- return kMinInt;
+static int32_t ConvertAndSetOverflow(Representation r,
+ int64_t result,
+ bool* overflow) {
+ if (r.IsSmi()) {
+ if (result > Smi::kMaxValue) {
+ *overflow = true;
+ return Smi::kMaxValue;
+ }
+ if (result < Smi::kMinValue) {
+ *overflow = true;
+ return Smi::kMinValue;
+ }
+ } else {
+ if (result > kMaxInt) {
+ *overflow = true;
+ return kMaxInt;
+ }
+ if (result < kMinInt) {
+ *overflow = true;
+ return kMinInt;
+ }
}
return static_cast<int32_t>(result);
}
-static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t AddWithoutOverflow(Representation r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
-static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t SubWithoutOverflow(Representation r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
-static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+static int32_t MulWithoutOverflow(const Representation& r,
+ int32_t a,
+ int32_t b,
+ bool* overflow) {
int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
- return ConvertAndSetOverflow(result, overflow);
+ return ConvertAndSetOverflow(r, result, overflow);
}
@@ -306,8 +328,9 @@ int32_t Range::Mask() const {
void Range::AddConstant(int32_t value) {
if (value == 0) return;
bool may_overflow = false; // Overflow is ignored here.
- lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
- upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+ Representation r = Representation::Integer32();
+ lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow);
+ upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow);
#ifdef DEBUG
Verify();
#endif
@@ -366,10 +389,10 @@ void Range::Shl(int32_t value) {
}
-bool Range::AddAndCheckOverflow(Range* other) {
+bool Range::AddAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
- upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
+ lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+ upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow);
KeepOrder();
#ifdef DEBUG
Verify();
@@ -378,10 +401,10 @@ bool Range::AddAndCheckOverflow(Range* other) {
}
-bool Range::SubAndCheckOverflow(Range* other) {
+bool Range::SubAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
- upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
+ lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+ upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow);
KeepOrder();
#ifdef DEBUG
Verify();
@@ -406,12 +429,12 @@ void Range::Verify() const {
#endif
-bool Range::MulAndCheckOverflow(Range* other) {
+bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
bool may_overflow = false;
- int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
- int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
- int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
- int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
+ int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+ int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+ int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow);
+ int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow);
lower_ = Min(Min(v1, v2), Min(v3, v4));
upper_ = Max(Max(v1, v2), Max(v3, v4));
#ifdef DEBUG
@@ -1034,6 +1057,7 @@ void HBoundsCheck::TryGuaranteeRangeChanging(RangeEvaluationContext* context) {
offset_ = context->offset();
SetResponsibilityForRange(DIRECTION_UPPER);
context->set_upper_bound_guarantee(this);
+ isolate()->counters()->bounds_checks_eliminated()->Increment();
} else if (context->upper_bound_guarantee() != NULL &&
context->upper_bound_guarantee() != this &&
context->upper_bound_guarantee()->block() != block() &&
@@ -1043,6 +1067,7 @@ void HBoundsCheck::TryGuaranteeRangeChanging(RangeEvaluationContext* context) {
offset_ = context->offset();
SetResponsibilityForRange(DIRECTION_LOWER);
context->set_lower_bound_guarantee(this);
+ isolate()->counters()->bounds_checks_eliminated()->Increment();
}
}
@@ -1103,7 +1128,7 @@ void HBoundsCheck::AddInformativeDefinitions() {
// is a hack. Move it to some other HPhase.
if (FLAG_array_bounds_checks_elimination) {
if (index()->TryGuaranteeRange(length())) {
- set_skip_check(true);
+ set_skip_check();
}
if (DetectCompoundIndex()) {
HBoundsCheckBaseIndexInformation* base_index_info =
@@ -1429,7 +1454,7 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
HValue* HBitwise::Canonicalize() {
- if (!representation().IsInteger32()) return this;
+ if (!representation().IsSmiOrInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
if (left()->EqualsInteger32Constant(nop_constant) &&
@@ -1549,7 +1574,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
// If the input is integer32 then we replace the floor instruction
// with its input.
- if (val->representation().IsInteger32()) return val;
+ if (val->representation().IsSmiOrInteger32()) return val;
if (val->IsDiv() && (val->UseCount() == 1)) {
HDiv* hdiv = HDiv::cast(val);
@@ -1559,8 +1584,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
HValue* new_left = SimplifiedDividendForMathFloorOfDiv(left);
if (new_left == NULL &&
hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
- new_left = new(block()->zone())
- HChange(left, Representation::Integer32(), false, false);
+ new_left = new(block()->zone()) HChange(
+ left, Representation::Integer32(), false, false, false);
HChange::cast(new_left)->InsertBefore(this);
}
HValue* new_right =
@@ -1570,8 +1595,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
CpuFeatures::IsSupported(SUDIV) &&
#endif
hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
- new_right = new(block()->zone())
- HChange(right, Representation::Integer32(), false, false);
+ new_right = new(block()->zone()) HChange(
+ right, Representation::Integer32(), false, false, false);
HChange::cast(new_right)->InsertBefore(this);
}
@@ -1680,7 +1705,7 @@ void HCheckMaps::PrintDataTo(StringStream* stream) {
for (int i = 1; i < map_set()->length(); ++i) {
stream->Add(",%p", *map_set()->at(i));
}
- stream->Add("]");
+ stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
}
@@ -1741,7 +1766,7 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
Range* HValue::InferRange(Zone* zone) {
Range* result;
- if (type().IsSmi()) {
+ if (representation().IsSmi() || type().IsSmi()) {
result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
result->set_can_be_minus_zero(false);
} else {
@@ -1756,10 +1781,11 @@ Range* HValue::InferRange(Zone* zone) {
Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
- if (from().IsInteger32() &&
- to().IsSmiOrTagged() &&
- !value()->CheckFlag(HInstruction::kUint32) &&
- input_range != NULL && input_range->IsInSmiRange()) {
+ if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) &&
+ (to().IsSmi() ||
+ (to().IsTagged() &&
+ input_range != NULL &&
+ input_range->IsInSmiRange()))) {
set_type(HType::Smi());
ClearGVNFlag(kChangesNewSpacePromotion);
}
@@ -1767,7 +1793,9 @@ Range* HChange::InferRange(Zone* zone) {
? input_range->Copy(zone)
: HValue::InferRange(zone);
result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
- !CheckFlag(kAllUsesTruncatingToInt32));
+ !(CheckFlag(kAllUsesTruncatingToInt32) ||
+ CheckFlag(kAllUsesTruncatingToSmi)));
+ if (to().IsSmi()) result->ClampToSmi();
return result;
}
@@ -1804,15 +1832,18 @@ Range* HPhi::InferRange(Zone* zone) {
Range* HAdd::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->AddAndCheckOverflow(b) ||
- CheckFlag(kAllUsesTruncatingToInt32)) {
+ if (!res->AddAndCheckOverflow(r, b) ||
+ (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
ClearFlag(kCanOverflow);
}
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
a->CanBeMinusZero() && b->CanBeMinusZero());
return res;
} else {
@@ -1822,15 +1853,18 @@ Range* HAdd::InferRange(Zone* zone) {
Range* HSub::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->SubAndCheckOverflow(b) ||
- CheckFlag(kAllUsesTruncatingToInt32)) {
+ if (!res->SubAndCheckOverflow(r, b) ||
+ (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
ClearFlag(kCanOverflow);
}
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
a->CanBeMinusZero() && b->CanBeZero());
return res;
} else {
@@ -1840,17 +1874,19 @@ Range* HSub::InferRange(Zone* zone) {
Range* HMul::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(b)) {
+ if (!res->MulAndCheckOverflow(r, b)) {
// Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
// would be wrong, because truncated integer multiplication is too
// precise and therefore not the same as converting to Double and back.
ClearFlag(kCanOverflow);
}
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+ !CheckFlag(kAllUsesTruncatingToInt32) &&
((a->CanBeZero() && b->CanBeNegative()) ||
(a->CanBeNegative() && b->CanBeZero())));
return res;
@@ -1968,8 +2004,452 @@ bool HPhi::IsRelationTrueInternal(NumericRelation relation,
}
+InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
+ if (phi->block()->loop_information() == NULL) return NULL;
+ if (phi->OperandCount() != 2) return NULL;
+ int32_t candidate_increment;
+
+ candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
+ if (candidate_increment != 0) {
+ return new(phi->block()->graph()->zone())
+ InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
+ }
+
+ candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
+ if (candidate_increment != 0) {
+ return new(phi->block()->graph()->zone())
+ InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
+ }
+
+ return NULL;
+}
+
+
+/*
+ * This function tries to match the following patterns (and all the relevant
+ * variants related to |, & and + being commutative):
+ * base | constant_or_mask
+ * base & constant_and_mask
+ * (base + constant_offset) & constant_and_mask
+ * (base - constant_offset) & constant_and_mask
+ */
+void InductionVariableData::DecomposeBitwise(
+ HValue* value,
+ BitwiseDecompositionResult* result) {
+ HValue* base = IgnoreOsrValue(value);
+ result->base = value;
+
+ if (!base->representation().IsInteger32()) return;
+
+ if (base->IsBitwise()) {
+ bool allow_offset = false;
+ int32_t mask = 0;
+
+ HBitwise* bitwise = HBitwise::cast(base);
+ if (bitwise->right()->IsInteger32Constant()) {
+ mask = bitwise->right()->GetInteger32Constant();
+ base = bitwise->left();
+ } else if (bitwise->left()->IsInteger32Constant()) {
+ mask = bitwise->left()->GetInteger32Constant();
+ base = bitwise->right();
+ } else {
+ return;
+ }
+ if (bitwise->op() == Token::BIT_AND) {
+ result->and_mask = mask;
+ allow_offset = true;
+ } else if (bitwise->op() == Token::BIT_OR) {
+ result->or_mask = mask;
+ } else {
+ return;
+ }
+
+ result->context = bitwise->context();
+
+ if (allow_offset) {
+ if (base->IsAdd()) {
+ HAdd* add = HAdd::cast(base);
+ if (add->right()->IsInteger32Constant()) {
+ base = add->left();
+ } else if (add->left()->IsInteger32Constant()) {
+ base = add->right();
+ }
+ } else if (base->IsSub()) {
+ HSub* sub = HSub::cast(base);
+ if (sub->right()->IsInteger32Constant()) {
+ base = sub->left();
+ }
+ }
+ }
+
+ result->base = base;
+ }
+}
+
+
+void InductionVariableData::AddCheck(HBoundsCheck* check,
+ int32_t upper_limit) {
+ ASSERT(limit_validity() != NULL);
+ if (limit_validity() != check->block() &&
+ !limit_validity()->Dominates(check->block())) return;
+ if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+ check->block()->current_loop())) return;
+
+ ChecksRelatedToLength* length_checks = checks();
+ while (length_checks != NULL) {
+ if (length_checks->length() == check->length()) break;
+ length_checks = length_checks->next();
+ }
+ if (length_checks == NULL) {
+ length_checks = new(check->block()->zone())
+ ChecksRelatedToLength(check->length(), checks());
+ checks_ = length_checks;
+ }
+
+ length_checks->AddCheck(check, upper_limit);
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
+ if (checks() != NULL) {
+ InductionVariableCheck* c = checks();
+ HBasicBlock* current_block = c->check()->block();
+ while (c != NULL && c->check()->block() == current_block) {
+ c->set_upper_limit(current_upper_limit_);
+ c = c->next();
+ }
+ }
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
+ Token::Value token,
+ int32_t mask,
+ HValue* index_base,
+ HValue* context) {
+ ASSERT(first_check_in_block() != NULL);
+ HValue* previous_index = first_check_in_block()->index();
+ ASSERT(context != NULL);
+
+ set_added_constant(new(index_base->block()->graph()->zone()) HConstant(
+ mask, index_base->representation()));
+ if (added_index() != NULL) {
+ added_constant()->InsertBefore(added_index());
+ } else {
+ added_constant()->InsertBefore(first_check_in_block());
+ }
+
+ if (added_index() == NULL) {
+ first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
+ HInstruction* new_index = HBitwise::New(
+ index_base->block()->graph()->zone(),
+ token, context, index_base, added_constant());
+ ASSERT(new_index->IsBitwise());
+ new_index->ClearAllSideEffects();
+ new_index->AssumeRepresentation(Representation::Integer32());
+ set_added_index(HBitwise::cast(new_index));
+ added_index()->InsertBefore(first_check_in_block());
+ }
+ ASSERT(added_index()->op() == token);
+
+ added_index()->SetOperandAt(1, index_base);
+ added_index()->SetOperandAt(2, added_constant());
+ first_check_in_block()->SetOperandAt(0, added_index());
+ if (previous_index->UseCount() == 0) {
+ previous_index->DeleteAndReplaceWith(NULL);
+ }
+}
+
+void InductionVariableData::ChecksRelatedToLength::AddCheck(
+ HBoundsCheck* check,
+ int32_t upper_limit) {
+ BitwiseDecompositionResult decomposition;
+ InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+
+ if (first_check_in_block() == NULL ||
+ first_check_in_block()->block() != check->block()) {
+ CloseCurrentBlock();
+
+ first_check_in_block_ = check;
+ set_added_index(NULL);
+ set_added_constant(NULL);
+ current_and_mask_in_block_ = decomposition.and_mask;
+ current_or_mask_in_block_ = decomposition.or_mask;
+ current_upper_limit_ = upper_limit;
+
+ InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+ InductionVariableCheck(check, checks_, upper_limit);
+ checks_ = new_check;
+ return;
+ }
+
+ if (upper_limit > current_upper_limit()) {
+ current_upper_limit_ = upper_limit;
+ }
+
+ if (decomposition.and_mask != 0 &&
+ current_or_mask_in_block() == 0) {
+ if (current_and_mask_in_block() == 0 ||
+ decomposition.and_mask > current_and_mask_in_block()) {
+ UseNewIndexInCurrentBlock(Token::BIT_AND,
+ decomposition.and_mask,
+ decomposition.base,
+ decomposition.context);
+ current_and_mask_in_block_ = decomposition.and_mask;
+ }
+ check->set_skip_check();
+ }
+ if (current_and_mask_in_block() == 0) {
+ if (decomposition.or_mask > current_or_mask_in_block()) {
+ UseNewIndexInCurrentBlock(Token::BIT_OR,
+ decomposition.or_mask,
+ decomposition.base,
+ decomposition.context);
+ current_or_mask_in_block_ = decomposition.or_mask;
+ }
+ check->set_skip_check();
+ }
+
+ if (!check->skip_check()) {
+ InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+ InductionVariableCheck(check, checks_, upper_limit);
+ checks_ = new_check;
+ }
+}
+
+
+/*
+ * This method detects if phi is an induction variable, with phi_operand as
+ * its "incremented" value (the other operand would be the "base" value).
+ *
+ * It cheks is phi_operand has the form "phi + constant".
+ * If yes, the constant is the increment that the induction variable gets at
+ * every loop iteration.
+ * Otherwise it returns 0.
+ */
+int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
+ HValue* phi_operand) {
+ if (!phi_operand->representation().IsInteger32()) return 0;
+
+ if (phi_operand->IsAdd()) {
+ HAdd* operation = HAdd::cast(phi_operand);
+ if (operation->left() == phi &&
+ operation->right()->IsInteger32Constant()) {
+ return operation->right()->GetInteger32Constant();
+ } else if (operation->right() == phi &&
+ operation->left()->IsInteger32Constant()) {
+ return operation->left()->GetInteger32Constant();
+ }
+ } else if (phi_operand->IsSub()) {
+ HSub* operation = HSub::cast(phi_operand);
+ if (operation->left() == phi &&
+ operation->right()->IsInteger32Constant()) {
+ return -operation->right()->GetInteger32Constant();
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Swaps the information in "update" with the one contained in "this".
+ * The swapping is important because this method is used while doing a
+ * dominator tree traversal, and "update" will retain the old data that
+ * will be restored while backtracking.
+ */
+void InductionVariableData::UpdateAdditionalLimit(
+ InductionVariableLimitUpdate* update) {
+ ASSERT(update->updated_variable == this);
+ if (update->limit_is_upper) {
+ swap(&additional_upper_limit_, &update->limit);
+ swap(&additional_upper_limit_is_included_, &update->limit_is_included);
+ } else {
+ swap(&additional_lower_limit_, &update->limit);
+ swap(&additional_lower_limit_is_included_, &update->limit_is_included);
+ }
+}
+
+
+int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
+ int32_t or_mask) {
+ // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
+ const int32_t MAX_LIMIT = 1 << 30;
+
+ int32_t result = MAX_LIMIT;
+
+ if (limit() != NULL &&
+ limit()->IsInteger32Constant()) {
+ int32_t limit_value = limit()->GetInteger32Constant();
+ if (!limit_included()) {
+ limit_value--;
+ }
+ if (limit_value < result) result = limit_value;
+ }
+
+ if (additional_upper_limit() != NULL &&
+ additional_upper_limit()->IsInteger32Constant()) {
+ int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
+ if (!additional_upper_limit_is_included()) {
+ limit_value--;
+ }
+ if (limit_value < result) result = limit_value;
+ }
+
+ if (and_mask > 0 && and_mask < MAX_LIMIT) {
+ if (and_mask < result) result = and_mask;
+ return result;
+ }
+
+ // Add the effect of the or_mask.
+ result |= or_mask;
+
+ return result >= MAX_LIMIT ? kNoLimit : result;
+}
+
+
+HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
+ if (!v->IsPhi()) return v;
+ HPhi* phi = HPhi::cast(v);
+ if (phi->OperandCount() != 2) return v;
+ if (phi->OperandAt(0)->block()->is_osr_entry()) {
+ return phi->OperandAt(1);
+ } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
+ return phi->OperandAt(0);
+ } else {
+ return v;
+ }
+}
+
+
+InductionVariableData* InductionVariableData::GetInductionVariableData(
+ HValue* v) {
+ v = IgnoreOsrValue(v);
+ if (v->IsPhi()) {
+ return HPhi::cast(v)->induction_variable_data();
+ }
+ return NULL;
+}
+
+
+/*
+ * Check if a conditional branch to "current_branch" with token "token" is
+ * the branch that keeps the induction loop running (and, conversely, will
+ * terminate it if the "other_branch" is taken).
+ *
+ * Three conditions must be met:
+ * - "current_branch" must be in the induction loop.
+ * - "other_branch" must be out of the induction loop.
+ * - "token" and the induction increment must be "compatible": the token should
+ * be a condition that keeps the execution inside the loop until the limit is
+ * reached.
+ */
+bool InductionVariableData::CheckIfBranchIsLoopGuard(
+ Token::Value token,
+ HBasicBlock* current_branch,
+ HBasicBlock* other_branch) {
+ if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+ current_branch->current_loop())) {
+ return false;
+ }
+
+ if (phi()->block()->current_loop()->IsNestedInThisLoop(
+ other_branch->current_loop())) {
+ return false;
+ }
+
+ if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
+ return true;
+ }
+ if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
+ return true;
+ }
+ if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
+ return true;
+ }
+
+ return false;
+}
+
+
+void InductionVariableData::ComputeLimitFromPredecessorBlock(
+ HBasicBlock* block,
+ LimitFromPredecessorBlock* result) {
+ if (block->predecessors()->length() != 1) return;
+ HBasicBlock* predecessor = block->predecessors()->at(0);
+ HInstruction* end = predecessor->last();
+
+ if (!end->IsCompareNumericAndBranch()) return;
+ HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
+
+ Token::Value token = branch->token();
+ if (!Token::IsArithmeticCompareOp(token)) return;
+
+ HBasicBlock* other_target;
+ if (block == branch->SuccessorAt(0)) {
+ other_target = branch->SuccessorAt(1);
+ } else {
+ other_target = branch->SuccessorAt(0);
+ token = Token::NegateCompareOp(token);
+ ASSERT(block == branch->SuccessorAt(1));
+ }
+
+ InductionVariableData* data;
+
+ data = GetInductionVariableData(branch->left());
+ HValue* limit = branch->right();
+ if (data == NULL) {
+ data = GetInductionVariableData(branch->right());
+ token = Token::ReverseCompareOp(token);
+ limit = branch->left();
+ }
+
+ if (data != NULL) {
+ result->variable = data;
+ result->token = token;
+ result->limit = limit;
+ result->other_target = other_target;
+ }
+}
+
+
+/*
+ * Compute the limit that is imposed on an induction variable when entering
+ * "block" (if any).
+ * If the limit is the "proper" induction limit (the one that makes the loop
+ * terminate when the induction variable reaches it) it is stored directly in
+ * the induction variable data.
+ * Otherwise the limit is written in "additional_limit" and the method
+ * returns true.
+ */
+bool InductionVariableData::ComputeInductionVariableLimit(
+ HBasicBlock* block,
+ InductionVariableLimitUpdate* additional_limit) {
+ LimitFromPredecessorBlock limit;
+ ComputeLimitFromPredecessorBlock(block, &limit);
+ if (!limit.LimitIsValid()) return false;
+
+ if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
+ block,
+ limit.other_target)) {
+ limit.variable->limit_ = limit.limit;
+ limit.variable->limit_included_ = limit.LimitIsIncluded();
+ limit.variable->limit_validity_ = block;
+ limit.variable->induction_exit_block_ = block->predecessors()->at(0);
+ limit.variable->induction_exit_target_ = limit.other_target;
+ return false;
+ } else {
+ additional_limit->updated_variable = limit.variable;
+ additional_limit->limit = limit.limit;
+ additional_limit->limit_is_upper = limit.LimitIsUpper();
+ additional_limit->limit_is_included = limit.LimitIsIncluded();
+ return true;
+ }
+}
+
+
Range* HMathMinMax::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
+ if (representation().IsSmiOrInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
@@ -2054,6 +2534,7 @@ void HPhi::InitRealUses(int phi_id) {
// Compute a conservative approximation of truncating uses before inferring
// representations. The proper, exact computation will be done later, when
// inserting representation changes.
+ SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
@@ -2064,8 +2545,13 @@ void HPhi::InitRealUses(int phi_id) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
- if (!value->IsSimulate() && !value->CheckFlag(kTruncatingToInt32)) {
- ClearFlag(kTruncatingToInt32);
+ if (!value->IsSimulate()) {
+ if (!value->CheckFlag(kTruncatingToSmi)) {
+ ClearFlag(kTruncatingToSmi);
+ }
+ if (!value->CheckFlag(kTruncatingToInt32)) {
+ ClearFlag(kTruncatingToInt32);
+ }
}
}
}
@@ -2136,16 +2622,6 @@ void HSimulate::PrintDataTo(StringStream* stream) {
}
-void HDeoptimize::PrintDataTo(StringStream* stream) {
- if (OperandCount() == 0) return;
- OperandAt(0)->PrintNameTo(stream);
- for (int i = 1; i < OperandCount(); ++i) {
- stream->Add(" ");
- OperandAt(i)->PrintNameTo(stream);
- }
-}
-
-
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
@@ -2262,7 +2738,7 @@ HConstant::HConstant(double double_value,
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
- if (has_smi_value_) {
+ if (has_smi_value_ && kSmiValueSize == 31) {
r = Representation::Smi();
} else if (has_int32_value_) {
r = Representation::Integer32();
@@ -2310,20 +2786,38 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
}
-HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
+Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
+ HConstant* res = NULL;
if (has_int32_value_) {
- return new(zone) HConstant(int32_value_,
- Representation::Integer32(),
- is_not_in_new_space_,
- handle_);
+ res = new(zone) HConstant(int32_value_,
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
+ } else if (has_double_value_) {
+ res = new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
+ } else {
+ ASSERT(!HasNumberValue());
+ Maybe<HConstant*> number = CopyToTruncatedNumber(zone);
+ if (number.has_value) return number.value->CopyToTruncatedInt32(zone);
}
- if (has_double_value_) {
- return new(zone) HConstant(DoubleToInt32(double_value_),
- Representation::Integer32(),
- is_not_in_new_space_,
- handle_);
+ return Maybe<HConstant*>(res != NULL, res);
+}
+
+
+Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
+ HConstant* res = NULL;
+ if (handle()->IsBoolean()) {
+ res = handle()->BooleanValue() ?
+ new(zone) HConstant(1) : new(zone) HConstant(0);
+ } else if (handle()->IsUndefined()) {
+ res = new(zone) HConstant(OS::nan_value());
+ } else if (handle()->IsNull()) {
+ res = new(zone) HConstant(0);
}
- return NULL;
+ return Maybe<HConstant*>(res != NULL, res);
}
@@ -2351,25 +2845,18 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
- // When the operation has information about its own output type, don't look
- // at uses.
- if (!observed_output_representation_.IsNone()) return;
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- new_rep = RepresentationFromUseRequirements();
- if (new_rep.fits_into(Representation::Integer32())) {
- UpdateRepresentation(new_rep, h_infer, "use requirements");
+ if (observed_output_representation_.IsNone()) {
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ } else {
+ new_rep = RepresentationFromOutput();
+ UpdateRepresentation(new_rep, h_infer, "output");
}
-}
-
-bool HBinaryOperation::IgnoreObservedOutputRepresentation(
- Representation current_rep) {
- return observed_output_representation_.IsDouble() &&
- current_rep.IsInteger32() &&
- // Mul in Integer32 mode would be too precise.
- !this->IsMul() &&
- CheckUsesForFlag(kTruncatingToInt32);
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
+ }
}
@@ -2378,28 +2865,38 @@ Representation HBinaryOperation::RepresentationFromInputs() {
// the currently assumed output representation.
Representation rep = representation();
for (int i = 1; i <= 2; ++i) {
- Representation input_rep = observed_input_representation(i);
- if (input_rep.is_more_general_than(rep)) rep = input_rep;
+ rep = rep.generalize(observed_input_representation(i));
}
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
+ if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+ if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
- if (left_rep.is_more_general_than(rep) && !left_rep.IsTagged()) {
- rep = left_rep;
- }
- if (right_rep.is_more_general_than(rep) && !right_rep.IsTagged()) {
- rep = right_rep;
- }
+ return rep;
+}
+
+
+bool HBinaryOperation::IgnoreObservedOutputRepresentation(
+ Representation current_rep) {
+ return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
+ (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
+ // Mul in Integer32 mode would be too precise.
+ !this->IsMul();
+}
+
+
+Representation HBinaryOperation::RepresentationFromOutput() {
+ Representation rep = representation();
// Consider observed output representation, but ignore it if it's Double,
// this instruction is not a division, and all its uses are truncating
// to Integer32.
if (observed_output_representation_.is_more_general_than(rep) &&
!IgnoreObservedOutputRepresentation(rep)) {
- rep = observed_output_representation_;
+ return observed_output_representation_;
}
- return rep;
+ return Representation::None();
}
@@ -2715,7 +3212,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
types_.Add(types->at(i), zone);
break;
}
- case CONSTANT_FUNCTION:
+ case CONSTANT:
types_.Add(types->at(i), zone);
break;
case CALLBACKS:
@@ -2757,6 +3254,55 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
}
+HCheckMaps* HCheckMaps::New(HValue* value,
+ Handle<Map> map,
+ Zone* zone,
+ CompilationInfo* info,
+ HValue* typecheck) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
+ check_map->map_set_.Add(map, zone);
+ if (map->CanOmitMapChecks() &&
+ value->IsConstant() &&
+ HConstant::cast(value)->InstanceOf(map)) {
+ check_map->omit(info);
+ }
+ return check_map;
+}
+
+
+HCheckMaps* HCheckMaps::NewWithTransitions(HValue* value,
+ Handle<Map> map,
+ Zone* zone,
+ CompilationInfo* info) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
+ check_map->map_set_.Add(map, zone);
+
+ // Since transitioned elements maps of the initial map don't fail the map
+ // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
+ check_map->ClearGVNFlag(kDependsOnElementsKind);
+
+ ElementsKind kind = map->elements_kind();
+ bool packed = IsFastPackedElementsKind(kind);
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ Map* transitioned_map =
+ map->LookupElementsTransitionMap(kind);
+ if (transitioned_map) {
+ check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
+ }
+ };
+
+ if (map->CanOmitMapChecks() &&
+ value->IsConstant() &&
+ HConstant::cast(value)->InstanceOf(map)) {
+ check_map->omit(info);
+ }
+
+ check_map->map_set_.Sort();
+ return check_map;
+}
+
+
void HCheckMaps::FinalizeUniqueValueId() {
if (!map_unique_ids_.is_empty()) return;
Zone* zone = block()->zone();
@@ -3187,11 +3733,6 @@ HType HStringCharFromCode::CalculateInferredType() {
}
-HType HAllocate::CalculateInferredType() {
- return type_;
-}
-
-
void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
@@ -3210,12 +3751,9 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator_size = dominator_allocate_instr->size();
HValue* current_size = size();
// We can just fold allocations that are guaranteed in new space.
- // TODO(hpayer): Support double aligned allocations.
// TODO(hpayer): Add support for non-constant allocation in dominator.
- if (!GuaranteedInNewSpace() || MustAllocateDoubleAligned() ||
- !current_size->IsInteger32Constant() ||
+ if (!GuaranteedInNewSpace() || !current_size->IsInteger32Constant() ||
!dominator_allocate_instr->GuaranteedInNewSpace() ||
- dominator_allocate_instr->MustAllocateDoubleAligned() ||
!dominator_size->IsInteger32Constant()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s)\n",
@@ -3229,43 +3767,37 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HConstant::cast(dominator_size)->GetInteger32Constant();
int32_t current_size_constant =
HConstant::cast(current_size)->GetInteger32Constant();
+ int32_t new_dominator_size = dominator_size_constant + current_size_constant;
+
+ if (MustAllocateDoubleAligned()) {
+ if (!dominator_allocate_instr->MustAllocateDoubleAligned()) {
+ dominator_allocate_instr->SetFlags(HAllocate::ALLOCATE_DOUBLE_ALIGNED);
+ }
+ if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
+ dominator_size_constant += kDoubleSize / 2;
+ new_dominator_size += kDoubleSize / 2;
+ }
+ }
+
+ if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic(),
+ new_dominator_size);
+ }
+ return;
+ }
HBasicBlock* block = dominator->block();
Zone* zone = block->zone();
- HInstruction* new_dominator_size = new(zone) HConstant(
- dominator_size_constant + current_size_constant);
- new_dominator_size->InsertBefore(dominator_allocate_instr);
- dominator_allocate_instr->UpdateSize(new_dominator_size);
+ HInstruction* new_dominator_size_constant = new(zone) HConstant(
+ new_dominator_size);
+ new_dominator_size_constant->InsertBefore(dominator_allocate_instr);
+ dominator_allocate_instr->UpdateSize(new_dominator_size_constant);
#ifdef VERIFY_HEAP
- HInstruction* free_space_instr =
- new(zone) HInnerAllocatedObject(dominator_allocate_instr,
- dominator_size_constant,
- type());
- free_space_instr->InsertAfter(dominator_allocate_instr);
- HConstant* filler_map = new(zone) HConstant(
- isolate()->factory()->free_space_map(),
- UniqueValueId(isolate()->heap()->free_space_map()),
- Representation::Tagged(),
- HType::Tagged(),
- false,
- true,
- false,
- false);
- filler_map->InsertAfter(free_space_instr);
-
- HInstruction* store_map = new(zone) HStoreNamedField(
- free_space_instr, HObjectAccess::ForMap(), filler_map);
- store_map->SetFlag(HValue::kHasNoObservableSideEffects);
- store_map->InsertAfter(filler_map);
-
- HInstruction* free_space_size = new(zone) HConstant(current_size_constant);
- free_space_size->InsertAfter(store_map);
- HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
- HInstruction* store_size = new(zone) HStoreNamedField(
- free_space_instr, access, free_space_size);
- store_size->SetFlag(HValue::kHasNoObservableSideEffects);
- store_size->InsertAfter(free_space_size);
+ if (FLAG_verify_heap) {
+ dominator_allocate_instr->SetFlags(HAllocate::PREFILL_WITH_FILLER);
+ }
#endif
// After that replace the dominated allocate instruction.
@@ -3301,29 +3833,28 @@ HType HFunctionLiteral::CalculateInferredType() {
HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
BitVector* visited) {
visited->Add(id());
- if (representation().IsInteger32() &&
- !value()->representation().IsInteger32()) {
+ if (representation().IsSmiOrInteger32() &&
+ !value()->representation().Equals(representation())) {
if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
SetFlag(kBailoutOnMinusZero);
}
}
- if (RequiredInputRepresentation(0).IsInteger32() &&
- representation().IsInteger32()) {
+ if (RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+ representation().Equals(RequiredInputRepresentation(0))) {
return value();
}
return NULL;
}
-
HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
- if (from().IsInteger32()) return NULL;
+ if (from().IsSmiOrInteger32()) return NULL;
if (CanTruncateToInt32()) return NULL;
if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
SetFlag(kBailoutOnMinusZero);
}
- ASSERT(!from().IsInteger32() || !to().IsInteger32());
+ ASSERT(!from().IsSmiOrInteger32() || !to().IsSmiOrInteger32());
return NULL;
}
@@ -3410,7 +3941,7 @@ bool HStoreKeyed::NeedsCanonicalization() {
}
if (value()->IsChange()) {
- if (HChange::cast(value())->from().IsInteger32()) {
+ if (HChange::cast(value())->from().IsSmiOrInteger32()) {
return false;
}
if (HChange::cast(value())->value()->type().IsSmi()) {
@@ -3421,8 +3952,8 @@ bool HStoreKeyed::NeedsCanonicalization() {
}
-#define H_CONSTANT_INT32(val) \
-new(zone) HConstant(static_cast<int32_t>(val), Representation::Integer32())
+#define H_CONSTANT_INT(val) \
+new(zone) HConstant(static_cast<int32_t>(val))
#define H_CONSTANT_DOUBLE(val) \
new(zone) HConstant(static_cast<double>(val), Representation::Double())
@@ -3435,7 +3966,7 @@ HInstruction* HInstr::New( \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
if (TypeInfo::IsInt32Double(double_res)) { \
- return H_CONSTANT_INT32(double_res); \
+ return H_CONSTANT_INT(double_res); \
} \
return H_CONSTANT_DOUBLE(double_res); \
} \
@@ -3634,7 +4165,7 @@ HInstruction* HMod::New(Zone* zone,
if ((res == 0) && (dividend < 0)) {
return H_CONSTANT_DOUBLE(-0.0);
}
- return H_CONSTANT_INT32(res);
+ return H_CONSTANT_INT(res);
}
}
}
@@ -3652,7 +4183,7 @@ HInstruction* HDiv::New(
if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
if (TypeInfo::IsInt32Double(double_res)) {
- return H_CONSTANT_INT32(double_res);
+ return H_CONSTANT_INT(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
} else {
@@ -3689,7 +4220,7 @@ HInstruction* HBitwise::New(
result = 0; // Please the compiler.
UNREACHABLE();
}
- return H_CONSTANT_INT32(result);
+ return H_CONSTANT_INT(result);
}
}
return new(zone) HBitwise(op, context, left, right);
@@ -3703,7 +4234,7 @@ HInstruction* HInstr::New( \
HConstant* c_left = HConstant::cast(left); \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT32(result); \
+ return H_CONSTANT_INT(result); \
} \
} \
return new(zone) HInstr(context, left, right); \
@@ -3729,14 +4260,14 @@ HInstruction* HShr::New(
if ((right_val == 0) && (left_val < 0)) {
return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
}
- return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+ return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
}
}
return new(zone) HShr(context, left, right);
}
-#undef H_CONSTANT_INT32
+#undef H_CONSTANT_INT
#undef H_CONSTANT_DOUBLE
@@ -3761,8 +4292,7 @@ void HPhi::SimplifyConstantInputs() {
continue;
} else if (operand->HasDoubleValue()) {
HConstant* integer_input =
- new(graph->zone()) HConstant(DoubleToInt32(operand->DoubleValue()),
- Representation::Integer32());
+ new(graph->zone()) HConstant(DoubleToInt32(operand->DoubleValue()));
integer_input->InsertAfter(operand);
SetOperandAt(i, integer_input);
} else if (operand == graph->GetConstantTrue()) {
@@ -3777,7 +4307,7 @@ void HPhi::SimplifyConstantInputs() {
HValue* use = it.value();
if (use->IsBinaryOperation()) {
HBinaryOperation::cast(use)->set_observed_input_representation(
- it.index(), Representation::Integer32());
+ it.index(), Representation::Smi());
}
}
}
@@ -3826,6 +4356,17 @@ Representation HValue::RepresentationFromUseRequirements() {
}
+bool HValue::HasNonSmiUse() {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ if (!use_rep.IsNone() && !use_rep.IsSmi()) return true;
+ }
+ return false;
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
@@ -3869,7 +4410,8 @@ HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
}
-HObjectAccess HObjectAccess::ForJSObjectOffset(int offset) {
+HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
+ Representation representation) {
ASSERT(offset >= 0);
Portion portion = kInobject;
@@ -3878,7 +4420,7 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset) {
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
- return HObjectAccess(portion, offset, Handle<String>::null());
+ return HObjectAccess(portion, offset, representation);
}
@@ -3893,13 +4435,14 @@ HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
- return HObjectAccess(portion, offset, Handle<String>::null());
+ return HObjectAccess(portion, offset);
}
-HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset) {
+HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
+ Representation representation) {
ASSERT(offset >= 0);
- return HObjectAccess(kBackingStore, offset, Handle<String>::null());
+ return HObjectAccess(kBackingStore, offset, representation);
}
@@ -3907,30 +4450,35 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map,
LookupResult *lookup, Handle<String> name) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
int index;
+ Representation representation;
if (lookup->IsField()) {
index = lookup->GetLocalFieldIndexFromMap(*map);
+ representation = lookup->representation();
} else {
Map* transition = lookup->GetTransitionMapFromMap(*map);
int descriptor = transition->LastAdded();
index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
map->inobject_properties();
+ PropertyDetails details =
+ transition->instance_descriptors()->GetDetails(descriptor);
+ representation = details.representation();
}
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
- return HObjectAccess(kInobject, offset);
+ return HObjectAccess(kInobject, offset, representation);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return HObjectAccess(kBackingStore, offset, name);
+ return HObjectAccess(kBackingStore, offset, representation, name);
}
}
HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
return HObjectAccess(
- kInobject, Cell::kValueOffset,
+ kInobject, Cell::kValueOffset, Representation::Tagged(),
Handle<String>(isolate->heap()->cell_value_string()));
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 5fba5f2c63..40bbc90245 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -33,6 +33,7 @@
#include "allocation.h"
#include "code-stubs.h"
#include "data-flow.h"
+#include "deoptimizer.h"
#include "small-pointer-list.h"
#include "string-stream.h"
#include "v8conversions.h"
@@ -91,17 +92,18 @@ class LChunkBuilder;
V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
- V(CheckSmi) \
+ V(CheckMapValue) \
V(CheckPrototypeMaps) \
+ V(CheckSmi) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
- V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -111,6 +113,8 @@ class LChunkBuilder;
V(EnterInlined) \
V(EnvironmentMarker) \
V(ForceRepresentation) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -134,6 +138,7 @@ class LChunkBuilder;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -162,7 +167,6 @@ class LChunkBuilder;
V(Shl) \
V(Shr) \
V(Simulate) \
- V(SoftDeoptimize) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
@@ -188,11 +192,6 @@ class LChunkBuilder;
V(UnknownOSRValue) \
V(UseConst) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
@@ -200,19 +199,20 @@ class LChunkBuilder;
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
- V(Calls) \
- V(InobjectFields) \
+ V(ArrayElements) \
+ V(ArrayLengths) \
V(BackingStoreFields) \
+ V(Calls) \
+ V(ContextSlots) \
+ V(DoubleArrayElements) \
V(DoubleFields) \
V(ElementsKind) \
V(ElementsPointer) \
- V(ArrayElements) \
- V(DoubleArrayElements) \
- V(SpecializedArrayElements) \
V(GlobalVars) \
- V(ArrayLengths) \
- V(ContextSlots) \
- V(OsrEntries)
+ V(InobjectFields) \
+ V(OsrEntries) \
+ V(SpecializedArrayElements)
+
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
virtual bool Is##type() const { return true; } \
@@ -294,9 +294,9 @@ class Range: public ZoneObject {
void AddConstant(int32_t value);
void Sar(int32_t value);
void Shl(int32_t value);
- bool AddAndCheckOverflow(Range* other);
- bool SubAndCheckOverflow(Range* other);
- bool MulAndCheckOverflow(Range* other);
+ bool AddAndCheckOverflow(const Representation& r, Range* other);
+ bool SubAndCheckOverflow(const Representation& r, Range* other);
+ bool MulAndCheckOverflow(const Representation& r, Range* other);
private:
int32_t lower_;
@@ -407,6 +407,11 @@ class HType {
return ((type_ & kString) == kString);
}
+ bool IsNonString() const {
+ return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() ||
+ IsBoolean() || IsJSArray();
+ }
+
bool IsBoolean() const {
ASSERT(type_ != kUninitialized);
return ((type_ & kBoolean) == kBoolean);
@@ -800,6 +805,8 @@ class HValue: public ZoneObject {
kIsArguments,
kTruncatingToInt32,
kAllUsesTruncatingToInt32,
+ kTruncatingToSmi,
+ kAllUsesTruncatingToSmi,
// Set after an instruction is killed.
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
@@ -886,6 +893,7 @@ class HValue: public ZoneObject {
HUseIterator uses() const { return HUseIterator(use_list_); }
virtual bool EmitAtUses() { return false; }
+
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
ASSERT(CheckFlag(kFlexibleRepresentation));
@@ -1161,6 +1169,7 @@ class HValue: public ZoneObject {
}
Representation RepresentationFromUses();
Representation RepresentationFromUseRequirements();
+ bool HasNonSmiUse();
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
const char* reason);
@@ -1493,16 +1502,20 @@ class HNumericConstraint : public HTemplateInstruction<2> {
};
-// We insert soft-deoptimize when we hit code with unknown typefeedback,
-// so that we get a chance of re-optimizing with useful typefeedback.
-// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
-class HSoftDeoptimize: public HTemplateInstruction<0> {
+class HDeoptimize: public HTemplateInstruction<0> {
public:
+ explicit HDeoptimize(Deoptimizer::BailoutType type) : type_(type) {}
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
+ Deoptimizer::BailoutType type() { return type_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+ Deoptimizer::BailoutType type_;
};
@@ -1517,59 +1530,6 @@ class HDebugBreak: public HTemplateInstruction<0> {
};
-class HDeoptimize: public HControlInstruction {
- public:
- HDeoptimize(int environment_length,
- int first_local_index,
- int first_expression_index,
- Zone* zone)
- : values_(environment_length, zone),
- first_local_index_(first_local_index),
- first_expression_index_(first_expression_index) { }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual int SuccessorCount() { return 0; }
- virtual HBasicBlock* SuccessorAt(int i) {
- UNREACHABLE();
- return NULL;
- }
- virtual void SetSuccessorAt(int i, HBasicBlock* block) {
- UNREACHABLE();
- }
-
- void AddEnvironmentValue(HValue* value, Zone* zone) {
- values_.Add(NULL, zone);
- SetOperandAt(values_.length() - 1, value);
- }
- int first_local_index() { return first_local_index_; }
- int first_expression_index() { return first_expression_index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- enum UseEnvironment {
- kNoUses,
- kUseAll
- };
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
- }
-
- private:
- ZoneList<HValue*> values_;
- int first_local_index_;
- int first_expression_index_;
-};
-
-
class HGoto: public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
@@ -1758,7 +1718,8 @@ class HChange: public HUnaryOperation {
public:
HChange(HValue* value,
Representation to,
- bool is_truncating,
+ bool is_truncating_to_smi,
+ bool is_truncating_to_int32,
bool allow_undefined_as_nan)
: HUnaryOperation(value) {
ASSERT(!value->representation().IsNone());
@@ -1767,7 +1728,8 @@ class HChange: public HUnaryOperation {
set_representation(to);
SetFlag(kUseGVN);
if (allow_undefined_as_nan) SetFlag(kAllowUndefinedAsNaN);
- if (is_truncating) SetFlag(kTruncatingToInt32);
+ if (is_truncating_to_smi) SetFlag(kTruncatingToSmi);
+ if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
} else {
@@ -2668,6 +2630,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
+ // TODO(verwaest): Set representation to flexible int starting as smi.
set_representation(Representation::Integer32());
break;
case kMathAbs:
@@ -2732,12 +2695,7 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMaps: public HTemplateInstruction<2> {
public:
static HCheckMaps* New(HValue* value, Handle<Map> map, Zone* zone,
- HValue *typecheck = NULL) {
- HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- check_map->map_set_.Add(map, zone);
- return check_map;
- }
-
+ CompilationInfo* info, HValue *typecheck = NULL);
static HCheckMaps* New(HValue* value, SmallMapList* maps, Zone* zone,
HValue *typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
@@ -2749,27 +2707,9 @@ class HCheckMaps: public HTemplateInstruction<2> {
}
static HCheckMaps* NewWithTransitions(HValue* value, Handle<Map> map,
- Zone* zone) {
- HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
- check_map->map_set_.Add(map, zone);
-
- // Since transitioned elements maps of the initial map don't fail the map
- // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
- check_map->ClearGVNFlag(kDependsOnElementsKind);
-
- ElementsKind kind = map->elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- Map* transitioned_map =
- map->LookupElementsTransitionMap(kind);
- if (transitioned_map) {
- check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
- }
- };
- check_map->map_set_.Sort();
- return check_map;
- }
+ Zone* zone, CompilationInfo* info);
+
+ bool CanOmitMapChecks() { return omit_; }
virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -2806,7 +2746,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
private:
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
- : map_unique_ids_(0, zone) {
+ : omit_(false), map_unique_ids_(0, zone) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
// TODO(titzer): do GVN flags already express this dependency?
@@ -2818,6 +2758,16 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnElementsKind);
}
+ void omit(CompilationInfo* info) {
+ omit_ = true;
+ for (int i = 0; i < map_set_.length(); i++) {
+ Handle<Map> map = map_set_.at(i);
+ map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
+ info);
+ }
+ }
+
+ bool omit_;
SmallMapList map_set_;
ZoneList<UniqueValueId> map_unique_ids_;
};
@@ -3074,12 +3024,234 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
};
+class InductionVariableData;
+
+
+struct InductionVariableLimitUpdate {
+ InductionVariableData* updated_variable;
+ HValue* limit;
+ bool limit_is_upper;
+ bool limit_is_included;
+
+ InductionVariableLimitUpdate()
+ : updated_variable(NULL), limit(NULL),
+ limit_is_upper(false), limit_is_included(false) {}
+};
+
+
+class HBoundsCheck;
+class HPhi;
+class HConstant;
+class HBitwise;
+
+
+class InductionVariableData : public ZoneObject {
+ public:
+ class InductionVariableCheck : public ZoneObject {
+ public:
+ HBoundsCheck* check() { return check_; }
+ InductionVariableCheck* next() { return next_; }
+ bool HasUpperLimit() { return upper_limit_ >= 0; }
+ int32_t upper_limit() {
+ ASSERT(HasUpperLimit());
+ return upper_limit_;
+ }
+ void set_upper_limit(int32_t upper_limit) {
+ upper_limit_ = upper_limit;
+ }
+
+ bool processed() { return processed_; }
+ void set_processed() { processed_ = true; }
+
+ InductionVariableCheck(HBoundsCheck* check,
+ InductionVariableCheck* next,
+ int32_t upper_limit = kNoLimit)
+ : check_(check), next_(next), upper_limit_(upper_limit),
+ processed_(false) {}
+
+ private:
+ HBoundsCheck* check_;
+ InductionVariableCheck* next_;
+ int32_t upper_limit_;
+ bool processed_;
+ };
+
+ class ChecksRelatedToLength : public ZoneObject {
+ public:
+ HValue* length() { return length_; }
+ ChecksRelatedToLength* next() { return next_; }
+ InductionVariableCheck* checks() { return checks_; }
+
+ void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+ void CloseCurrentBlock();
+
+ ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
+ : length_(length), next_(next), checks_(NULL),
+ first_check_in_block_(NULL),
+ added_index_(NULL),
+ added_constant_(NULL),
+ current_and_mask_in_block_(0),
+ current_or_mask_in_block_(0) {}
+
+ private:
+ void UseNewIndexInCurrentBlock(Token::Value token,
+ int32_t mask,
+ HValue* index_base,
+ HValue* context);
+
+ HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
+ HBitwise* added_index() { return added_index_; }
+ void set_added_index(HBitwise* index) { added_index_ = index; }
+ HConstant* added_constant() { return added_constant_; }
+ void set_added_constant(HConstant* constant) { added_constant_ = constant; }
+ int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
+ int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
+ int32_t current_upper_limit() { return current_upper_limit_; }
+
+ HValue* length_;
+ ChecksRelatedToLength* next_;
+ InductionVariableCheck* checks_;
+
+ HBoundsCheck* first_check_in_block_;
+ HBitwise* added_index_;
+ HConstant* added_constant_;
+ int32_t current_and_mask_in_block_;
+ int32_t current_or_mask_in_block_;
+ int32_t current_upper_limit_;
+ };
+
+ struct LimitFromPredecessorBlock {
+ InductionVariableData* variable;
+ Token::Value token;
+ HValue* limit;
+ HBasicBlock* other_target;
+
+ bool LimitIsValid() { return token != Token::ILLEGAL; }
+
+ bool LimitIsIncluded() {
+ return Token::IsEqualityOp(token) ||
+ token == Token::GTE || token == Token::LTE;
+ }
+ bool LimitIsUpper() {
+ return token == Token::LTE || token == Token::LT || token == Token::NE;
+ }
+
+ LimitFromPredecessorBlock()
+ : variable(NULL),
+ token(Token::ILLEGAL),
+ limit(NULL),
+ other_target(NULL) {}
+ };
+
+ static const int32_t kNoLimit = -1;
+
+ static InductionVariableData* ExaminePhi(HPhi* phi);
+ static void ComputeLimitFromPredecessorBlock(
+ HBasicBlock* block,
+ LimitFromPredecessorBlock* result);
+ static bool ComputeInductionVariableLimit(
+ HBasicBlock* block,
+ InductionVariableLimitUpdate* additional_limit);
+
+ struct BitwiseDecompositionResult {
+ HValue* base;
+ int32_t and_mask;
+ int32_t or_mask;
+ HValue* context;
+
+ BitwiseDecompositionResult()
+ : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
+ };
+ static void DecomposeBitwise(HValue* value,
+ BitwiseDecompositionResult* result);
+
+ void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+
+ bool CheckIfBranchIsLoopGuard(Token::Value token,
+ HBasicBlock* current_branch,
+ HBasicBlock* other_branch);
+
+ void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
+
+ HPhi* phi() { return phi_; }
+ HValue* base() { return base_; }
+ int32_t increment() { return increment_; }
+ HValue* limit() { return limit_; }
+ bool limit_included() { return limit_included_; }
+ HBasicBlock* limit_validity() { return limit_validity_; }
+ HBasicBlock* induction_exit_block() { return induction_exit_block_; }
+ HBasicBlock* induction_exit_target() { return induction_exit_target_; }
+ ChecksRelatedToLength* checks() { return checks_; }
+ HValue* additional_upper_limit() { return additional_upper_limit_; }
+ bool additional_upper_limit_is_included() {
+ return additional_upper_limit_is_included_;
+ }
+ HValue* additional_lower_limit() { return additional_lower_limit_; }
+ bool additional_lower_limit_is_included() {
+ return additional_lower_limit_is_included_;
+ }
+
+ bool LowerLimitIsNonNegativeConstant() {
+ if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
+ return true;
+ }
+ if (additional_lower_limit() != NULL &&
+ additional_lower_limit()->IsInteger32Constant() &&
+ additional_lower_limit()->GetInteger32Constant() >= 0) {
+ // Ignoring the corner case of !additional_lower_limit_is_included()
+ // is safe, handling it adds unneeded complexity.
+ return true;
+ }
+ return false;
+ }
+
+ int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
+
+ private:
+ template <class T> void swap(T* a, T* b) {
+ T c(*a);
+ *a = *b;
+ *b = c;
+ }
+
+ InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
+ : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
+ limit_(NULL), limit_included_(false), limit_validity_(NULL),
+ induction_exit_block_(NULL), induction_exit_target_(NULL),
+ checks_(NULL),
+ additional_upper_limit_(NULL),
+ additional_upper_limit_is_included_(false),
+ additional_lower_limit_(NULL),
+ additional_lower_limit_is_included_(false) {}
+
+ static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
+
+ static HValue* IgnoreOsrValue(HValue* v);
+ static InductionVariableData* GetInductionVariableData(HValue* v);
+
+ HPhi* phi_;
+ HValue* base_;
+ int32_t increment_;
+ HValue* limit_;
+ bool limit_included_;
+ HBasicBlock* limit_validity_;
+ HBasicBlock* induction_exit_block_;
+ HBasicBlock* induction_exit_target_;
+ ChecksRelatedToLength* checks_;
+ HValue* additional_upper_limit_;
+ bool additional_upper_limit_is_included_;
+ HValue* additional_lower_limit_;
+ bool additional_lower_limit_is_included_;
+};
+
+
class HPhi: public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
merged_index_(merged_index),
- phi_id_(-1) {
+ phi_id_(-1),
+ induction_variable_data_(NULL) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
indirect_uses_[i] = 0;
@@ -3110,6 +3282,21 @@ class HPhi: public HValue {
int merged_index() const { return merged_index_; }
+ InductionVariableData* induction_variable_data() {
+ return induction_variable_data_;
+ }
+ bool IsInductionVariable() {
+ return induction_variable_data_ != NULL;
+ }
+ bool IsLimitedInductionVariable() {
+ return IsInductionVariable() &&
+ induction_variable_data_->limit() != NULL;
+ }
+ void DetectInductionVariable() {
+ ASSERT(induction_variable_data_ == NULL);
+ induction_variable_data_ = InductionVariableData::ExaminePhi(this);
+ }
+
virtual void AddInformativeDefinitions();
virtual void PrintTo(StringStream* stream);
@@ -3177,6 +3364,7 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
+ InductionVariableData* induction_variable_data_;
};
@@ -3296,6 +3484,11 @@ class HConstant: public HTemplateInstruction<0> {
return handle_;
}
+ bool InstanceOf(Handle<Map> map) {
+ return handle_->IsJSObject() &&
+ Handle<JSObject>::cast(handle_)->map() == *map;
+ }
+
bool IsSpecialDouble() const {
return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
@@ -3339,7 +3532,7 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual Representation KnownOptimalRepresentation() {
- if (HasSmiValue()) return Representation::Smi();
+ if (HasSmiValue() && kSmiValueSize == 31) return Representation::Smi();
if (HasInteger32Value()) return Representation::Integer32();
if (HasNumberValue()) return Representation::Double();
return Representation::Tagged();
@@ -3350,7 +3543,8 @@ class HConstant: public HTemplateInstruction<0> {
virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); }
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
- HConstant* CopyToTruncatedInt32(Zone* zone) const;
+ Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
+ Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const {
ASSERT(HasInteger32Value());
@@ -3500,7 +3694,7 @@ class HBinaryOperation: public HTemplateInstruction<3> {
// Otherwise, if there is only one use of the right operand, it would be
// better off on the left for platforms that only have 2-arg arithmetic
// ops (e.g ia32, x64) that clobber the left operand.
- return (right()->UseCount() == 1);
+ return right()->UseCount() == 1;
}
HValue* BetterLeftOperand() {
@@ -3525,24 +3719,28 @@ class HBinaryOperation: public HTemplateInstruction<3> {
return observed_input_representation_[index - 1];
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
- virtual Representation RepresentationFromInputs();
- virtual void AssumeRepresentation(Representation r);
-
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
const char* reason) {
- // By default, binary operations don't handle Smis.
- if (new_rep.IsSmi()) {
- new_rep = Representation::Integer32();
- }
- HValue::UpdateRepresentation(new_rep, h_infer, reason);
+ Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
+ ? Representation::Integer32() : new_rep;
+ HValue::UpdateRepresentation(rep, h_infer, reason);
}
+ virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual Representation RepresentationFromInputs();
+ Representation RepresentationFromOutput();
+ virtual void AssumeRepresentation(Representation r);
+
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
+ virtual Representation RequiredInputRepresentation(int index) {
+ if (index == 0) return Representation::Tagged();
+ return representation();
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3695,15 +3893,16 @@ class HBoundsCheck: public HTemplateInstruction<2> {
HBoundsCheck(HValue* index, HValue* length)
: skip_check_(false),
base_(NULL), offset_(0), scale_(0),
- responsibility_direction_(DIRECTION_NONE) {
+ responsibility_direction_(DIRECTION_NONE),
+ allow_equality_(false) {
SetOperandAt(0, index);
SetOperandAt(1, length);
SetFlag(kFlexibleRepresentation);
SetFlag(kUseGVN);
}
- bool skip_check() { return skip_check_; }
- void set_skip_check(bool skip_check) { skip_check_ = skip_check; }
+ bool skip_check() const { return skip_check_; }
+ void set_skip_check() { skip_check_ = true; }
HValue* base() { return base_; }
int offset() { return offset_; }
int scale() { return scale_; }
@@ -3735,6 +3934,9 @@ class HBoundsCheck: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int arg_index) {
return representation();
}
+ virtual bool IsDeletable() const {
+ return skip_check() && !FLAG_debug_code;
+ }
virtual bool IsRelationTrueInternal(NumericRelation relation,
HValue* related_value,
@@ -3746,6 +3948,8 @@ class HBoundsCheck: public HTemplateInstruction<2> {
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
+ bool allow_equality() { return allow_equality_; }
+ void set_allow_equality(bool v) { allow_equality_ = v; }
virtual int RedefinedOperandIndex() { return 0; }
virtual bool IsPurelyInformativeDefinition() { return skip_check(); }
@@ -3768,6 +3972,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
int offset_;
int scale_;
RangeGuaranteeDirection responsibility_direction_;
+ bool allow_equality_;
};
@@ -3821,15 +4026,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
-
virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) {
- ASSERT(to.IsInteger32());
+ ASSERT(to.IsSmiOrInteger32());
ClearAllSideEffects();
SetFlag(kUseGVN);
} else {
@@ -3842,10 +4041,14 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
HInferRepresentationPhase* h_infer,
const char* reason) {
// We only generate either int32 or generic tagged bitwise operations.
- if (new_rep.IsSmi() || new_rep.IsDouble()) {
- new_rep = Representation::Integer32();
- }
- HValue::UpdateRepresentation(new_rep, h_infer, reason);
+ if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
+ virtual Representation observed_input_representation(int index) {
+ Representation r = HBinaryOperation::observed_input_representation(index);
+ if (r.IsDouble()) return Representation::Integer32();
+ return r;
}
virtual void initialize_output_representation(Representation observed) {
@@ -3911,11 +4114,6 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
@@ -4018,29 +4216,6 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
};
-class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
- public:
- HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
- : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
- ASSERT(op == Token::EQ_STRICT);
- }
-
- Token::Value op() const { return op_; }
- HValue* left() { return value(); }
- int right() const { return right_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
-
- private:
- const Token::Value op_;
- const int right_;
-};
-
-
class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
explicit HIsObjectAndBranch(HValue* value)
@@ -4475,6 +4650,13 @@ class HMul: public HArithmeticBinaryOperation {
return !representation().IsTagged();
}
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
@@ -4514,6 +4696,13 @@ class HMod: public HArithmeticBinaryOperation {
virtual HValue* Canonicalize();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
@@ -4556,6 +4745,13 @@ class HDiv: public HArithmeticBinaryOperation {
virtual HValue* Canonicalize();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
@@ -4596,11 +4792,12 @@ class HMathMinMax: public HArithmeticBinaryOperation {
virtual Representation RepresentationFromInputs() {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
- if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
- (right_rep.IsNone() || right_rep.IsInteger32())) {
- return Representation::Integer32();
- }
- return Representation::Double();
+ // TODO(verwaest): Initialize to Smi once lithium-codegen has been fixed.
+ Representation result = Representation::Integer32();
+ result = result.generalize(left_rep);
+ result = result.generalize(right_rep);
+ if (result.IsTagged()) return Representation::Double();
+ return result;
}
virtual bool IsCommutative() const { return true; }
@@ -4655,6 +4852,27 @@ class HBitwise: public HBitwiseBinaryOperation {
HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right), op_(op) {
ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
+ // BIT_AND with a smi-range positive value will always unset the
+ // entire sign-extension of the smi-sign.
+ if (op == Token::BIT_AND &&
+ ((left->IsConstant() &&
+ left->representation().IsSmi() &&
+ HConstant::cast(left)->Integer32Value() >= 0) ||
+ (right->IsConstant() &&
+ right->representation().IsSmi() &&
+ HConstant::cast(right)->Integer32Value() >= 0))) {
+ SetFlag(kTruncatingToSmi);
+ // BIT_OR with a smi-range negative value will always set the entire
+ // sign-extension of the smi-sign.
+ } else if (op == Token::BIT_OR &&
+ ((left->IsConstant() &&
+ left->representation().IsSmi() &&
+ HConstant::cast(left)->Integer32Value() < 0) ||
+ (right->IsConstant() &&
+ right->representation().IsSmi() &&
+ HConstant::cast(right)->Integer32Value() < 0))) {
+ SetFlag(kTruncatingToSmi);
+ }
}
Token::Value op_;
@@ -4670,6 +4888,13 @@ class HShl: public HBitwiseBinaryOperation {
virtual Range* InferRange(Zone* zone);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
@@ -4702,6 +4927,13 @@ class HShr: public HBitwiseBinaryOperation {
virtual Range* InferRange(Zone* zone);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
@@ -4734,6 +4966,13 @@ class HSar: public HBitwiseBinaryOperation {
virtual Range* InferRange(Zone* zone);
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
@@ -4752,6 +4991,13 @@ class HRor: public HBitwiseBinaryOperation {
ChangeRepresentation(Representation::Integer32());
}
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) {
+ if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Ror)
protected:
@@ -4964,14 +5210,15 @@ class HAllocate: public HTemplateInstruction<2> {
CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0,
CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
- ALLOCATE_DOUBLE_ALIGNED = 1 << 3
+ ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
+ PREFILL_WITH_FILLER = 1 << 4
};
HAllocate(HValue* context, HValue* size, HType type, Flags flags)
- : type_(type),
- flags_(flags) {
+ : flags_(flags) {
SetOperandAt(0, context);
SetOperandAt(1, size);
+ set_type(type);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kChangesNewSpacePromotion);
@@ -4996,7 +5243,6 @@ class HAllocate: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
- HType type() { return type_; }
virtual Representation RequiredInputRepresentation(int index) {
if (index == 0) {
@@ -5014,8 +5260,6 @@ class HAllocate: public HTemplateInstruction<2> {
known_initial_map_ = known_initial_map;
}
- virtual HType CalculateInferredType();
-
bool CanAllocateInNewSpace() const {
return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0;
}
@@ -5041,6 +5285,14 @@ class HAllocate: public HTemplateInstruction<2> {
return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
}
+ bool MustPrefillWithFiller() const {
+ return (flags_ & PREFILL_WITH_FILLER) != 0;
+ }
+
+ void SetFlags(Flags flags) {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | flags);
+ }
+
void UpdateSize(HValue* size) {
SetOperandAt(1, size);
}
@@ -5053,7 +5305,6 @@ class HAllocate: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(Allocate)
private:
- HType type_;
Flags flags_;
Handle<Map> known_initial_map_;
};
@@ -5062,10 +5313,10 @@ class HAllocate: public HTemplateInstruction<2> {
class HInnerAllocatedObject: public HTemplateInstruction<1> {
public:
HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
- : offset_(offset),
- type_(type) {
+ : offset_(offset) {
ASSERT(value->IsAllocate());
SetOperandAt(0, value);
+ set_type(type);
set_representation(Representation::Tagged());
}
@@ -5076,15 +5327,12 @@ class HInnerAllocatedObject: public HTemplateInstruction<1> {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() { return type_; }
-
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
private:
int offset_;
- HType type_;
};
@@ -5313,20 +5561,32 @@ class HObjectAccess {
return OffsetField::decode(value_);
}
+ inline Representation representation() const {
+ return Representation::FromKind(RepresentationField::decode(value_));
+ }
+
inline Handle<String> name() const {
return name_;
}
+ inline HObjectAccess WithRepresentation(Representation representation) {
+ return HObjectAccess(portion(), offset(), representation, name());
+ }
+
static HObjectAccess ForHeapNumberValue() {
- return HObjectAccess(kDouble, HeapNumber::kValueOffset);
+ return HObjectAccess(
+ kDouble, HeapNumber::kValueOffset, Representation::Double());
}
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
- static HObjectAccess ForArrayLength() {
- return HObjectAccess(kArrayLengths, JSArray::kLengthOffset);
+ static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
+ return HObjectAccess(
+ kArrayLengths, JSArray::kLengthOffset,
+ IsFastElementsKind(elements_kind) && FLAG_track_fields ?
+ Representation::Smi() : Representation::Tagged());
}
static HObjectAccess ForAllocationSiteTransitionInfo() {
@@ -5338,7 +5598,10 @@ class HObjectAccess {
}
static HObjectAccess ForFixedArrayLength() {
- return HObjectAccess(kArrayLengths, FixedArray::kLengthOffset);
+ return HObjectAccess(
+ kArrayLengths, FixedArray::kLengthOffset,
+ FLAG_track_fields ?
+ Representation::Smi() : Representation::Tagged());
}
static HObjectAccess ForPropertiesPointer() {
@@ -5369,13 +5632,15 @@ class HObjectAccess {
static HObjectAccess ForFixedArrayHeader(int offset);
// Create an access to an in-object property in a JSObject.
- static HObjectAccess ForJSObjectOffset(int offset);
+ static HObjectAccess ForJSObjectOffset(int offset,
+ Representation representation = Representation::Tagged());
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
// Create an access to the backing store of an object.
- static HObjectAccess ForBackingStoreOffset(int offset);
+ static HObjectAccess ForBackingStoreOffset(int offset,
+ Representation representation = Representation::Tagged());
// Create an access to a resolved field (in-object or backing store).
static HObjectAccess ForField(Handle<Map> map,
@@ -5405,17 +5670,23 @@ class HObjectAccess {
};
HObjectAccess(Portion portion, int offset,
- Handle<String> name = Handle<String>::null())
- : value_(PortionField::encode(portion) | OffsetField::encode(offset)),
+ Representation representation = Representation::Tagged(),
+ Handle<String> name = Handle<String>::null())
+ : value_(PortionField::encode(portion) |
+ RepresentationField::encode(representation.kind()) |
+ OffsetField::encode(offset)),
name_(name) {
- ASSERT(this->offset() == offset); // offset should decode correctly
- ASSERT(this->portion() == portion); // portion should decode correctly
+ // assert that the fields decode correctly
+ ASSERT(this->offset() == offset);
+ ASSERT(this->portion() == portion);
+ ASSERT(RepresentationField::decode(value_) == representation.kind());
}
class PortionField : public BitField<Portion, 0, 3> {};
- class OffsetField : public BitField<int, 3, 29> {};
+ class RepresentationField : public BitField<Representation::Kind, 3, 3> {};
+ class OffsetField : public BitField<int, 6, 26> {};
- uint32_t value_; // encodes both portion and offset
+ uint32_t value_; // encodes portion, representation, and offset
Handle<String> name_;
friend class HLoadNamedField;
@@ -5463,22 +5734,20 @@ class HLoadNamedField: public HTemplateInstruction<2> {
public:
HLoadNamedField(HValue* object,
HObjectAccess access,
- HValue* typecheck = NULL,
- Representation field_representation
- = Representation::Tagged())
- : access_(access),
- field_representation_(field_representation) {
+ HValue* typecheck = NULL)
+ : access_(access) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
- if (FLAG_track_fields && field_representation.IsSmi()) {
+ Representation representation = access.representation();
+ if (representation.IsSmi()) {
set_type(HType::Smi());
- set_representation(field_representation);
- } else if (FLAG_track_double_fields && field_representation.IsDouble()) {
- set_representation(field_representation);
+ set_representation(representation);
+ } else if (representation.IsDouble()) {
+ set_representation(representation);
} else if (FLAG_track_heap_object_fields &&
- field_representation.IsHeapObject()) {
+ representation.IsHeapObject()) {
set_type(HType::NonPrimitive());
set_representation(Representation::Tagged());
} else {
@@ -5495,7 +5764,9 @@ class HLoadNamedField: public HTemplateInstruction<2> {
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
HObjectAccess access() const { return access_; }
- Representation field_representation() const { return representation_; }
+ Representation field_representation() const {
+ return access_.representation();
+ }
virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -5515,7 +5786,6 @@ class HLoadNamedField: public HTemplateInstruction<2> {
virtual bool IsDeletable() const { return true; }
HObjectAccess access_;
- Representation field_representation_;
};
@@ -5616,8 +5886,8 @@ class ArrayInstructionInterface {
virtual ~ArrayInstructionInterface() { };
static Representation KeyedAccessIndexRequirement(Representation r) {
- return r.IsInteger32() ? Representation::Integer32()
- : Representation::Smi();
+ return r.IsInteger32() || kSmiValueSize != 31
+ ? Representation::Integer32() : Representation::Smi();
}
};
@@ -5815,11 +6085,8 @@ class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
HObjectAccess access,
- HValue* val,
- Representation field_representation
- = Representation::Tagged())
+ HValue* val)
: access_(access),
- field_representation_(field_representation),
transition_(),
transition_unique_id_(),
new_space_dominator_(NULL),
@@ -5833,12 +6100,10 @@ class HStoreNamedField: public HTemplateInstruction<2> {
virtual bool HasEscapingOperandAt(int index) { return index == 1; }
virtual Representation RequiredInputRepresentation(int index) {
- if (FLAG_track_double_fields &&
- index == 1 && field_representation_.IsDouble()) {
- return field_representation_;
- } else if (FLAG_track_fields &&
- index == 1 && field_representation_.IsSmi()) {
- return field_representation_;
+ if (index == 1 && field_representation().IsDouble()) {
+ return field_representation();
+ } else if (index == 1 && field_representation().IsSmi()) {
+ return field_representation();
}
return Representation::Tagged();
}
@@ -5870,13 +6135,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
- ASSERT(!(FLAG_track_double_fields && field_representation_.IsDouble()) ||
+ ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
transition_.is_null());
if (IsSkipWriteBarrier()) return false;
- return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
- // If there is a transition, a new storage object needs to be allocated.
- !(FLAG_track_double_fields && field_representation_.IsDouble()) &&
- StoringValueNeedsWriteBarrier(value()) &&
+ if (field_representation().IsDouble()) return false;
+ if (field_representation().IsSmi()) return false;
+ return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@@ -5890,12 +6154,11 @@ class HStoreNamedField: public HTemplateInstruction<2> {
}
Representation field_representation() const {
- return field_representation_;
+ return access_.representation();
}
private:
HObjectAccess access_;
- Representation field_representation_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
index e9628959de..28ae6eba40 100644
--- a/deps/v8/src/hydrogen-minus-zero.cc
+++ b/deps/v8/src/hydrogen-minus-zero.cc
@@ -41,10 +41,10 @@ void HComputeMinusZeroChecksPhase::Run() {
// int32-to-tagged and int32-to-double.
Representation from = change->value()->representation();
ASSERT(from.Equals(change->from()));
- if (from.IsInteger32()) {
+ if (from.IsSmiOrInteger32()) {
ASSERT(change->to().IsTagged() ||
change->to().IsDouble() ||
- change->to().IsSmi());
+ change->to().IsSmiOrInteger32());
ASSERT(visited_.IsEmpty());
PropagateMinusZeroChecks(change->value());
visited_.Clear();
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
index a2fa0bfb20..6c3d6ae3e0 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -94,7 +94,7 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
}
}
- builder_->AddSimulate(osr_entry_id);
+ builder_->Add<HSimulate>(osr_entry_id);
builder_->Add<HOsrEntry>(osr_entry_id);
HContext* context = builder_->Add<HContext>();
environment->BindContext(context);
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index e8f0140f66..63b7b4d6ec 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -45,20 +45,25 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
// information we treat constants like normal instructions and insert the
// change instructions for them.
HInstruction* new_value = NULL;
- bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
+ bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
+ bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
bool allow_undefined_as_nan =
use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
- new_value = (is_truncating && to.IsInteger32())
- ? constant->CopyToTruncatedInt32(graph()->zone())
- : constant->CopyToRepresentation(to, graph()->zone());
+ if (is_truncating_to_int && to.IsInteger32()) {
+ Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
+ if (res.has_value) new_value = res.value;
+ } else {
+ new_value = constant->CopyToRepresentation(to, graph()->zone());
+ }
}
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(value, to,
- is_truncating,
+ is_truncating_to_smi,
+ is_truncating_to_int,
allow_undefined_as_nan);
}
@@ -105,6 +110,8 @@ void HRepresentationChangesPhase::Run() {
HPhi* phi = phi_list->at(i);
if (phi->representation().IsInteger32()) {
phi->SetFlag(HValue::kTruncatingToInt32);
+ } else if (phi->representation().IsSmi()) {
+ phi->SetFlag(HValue::kTruncatingToSmi);
}
}
@@ -116,13 +123,18 @@ void HRepresentationChangesPhase::Run() {
HValue* use = it.value();
Representation input_representation =
use->RequiredInputRepresentation(it.index());
- if (!input_representation.IsInteger32() ||
- !use->CheckFlag(HValue::kTruncatingToInt32)) {
+ if ((phi->representation().IsInteger32() &&
+ !(input_representation.IsInteger32() &&
+ use->CheckFlag(HValue::kTruncatingToInt32))) ||
+ (phi->representation().IsSmi() &&
+ !(input_representation.IsSmi() ||
+ use->CheckFlag(HValue::kTruncatingToSmi)))) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
phi->id(), it.value()->id(), it.value()->Mnemonic());
}
phi->ClearFlag(HValue::kTruncatingToInt32);
+ phi->ClearFlag(HValue::kTruncatingToSmi);
worklist.Add(phi, zone());
break;
}
@@ -134,13 +146,16 @@ void HRepresentationChangesPhase::Run() {
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
- input->representation().IsInteger32() &&
- input->CheckFlag(HValue::kTruncatingToInt32)) {
+ ((input->representation().IsInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) ||
+ (input->representation().IsSmi() &&
+ input->CheckFlag(HValue::kTruncatingToSmi)))) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToInt32);
+ input->ClearFlag(HValue::kTruncatingToSmi);
worklist.Add(HPhi::cast(input), zone());
}
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 57220e0de1..e34688051d 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -34,6 +34,7 @@
#include "full-codegen.h"
#include "hashmap.h"
#include "hydrogen-bce.h"
+#include "hydrogen-bch.h"
#include "hydrogen-canonicalize.h"
#include "hydrogen-dce.h"
#include "hydrogen-dehoist.h"
@@ -146,26 +147,6 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
}
-HDeoptimize* HBasicBlock::CreateDeoptimize(
- HDeoptimize::UseEnvironment has_uses) {
- ASSERT(HasEnvironment());
- if (has_uses == HDeoptimize::kNoUses)
- return new(zone()) HDeoptimize(0, 0, 0, zone());
-
- HEnvironment* environment = last_environment();
- int first_local_index = environment->first_local_index();
- int first_expression_index = environment->first_expression_index();
- HDeoptimize* instr = new(zone()) HDeoptimize(
- environment->length(), first_local_index, first_expression_index, zone());
- for (int i = 0; i < environment->length(); i++) {
- HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val, zone());
- }
-
- return instr;
-}
-
-
HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
RemovableSimulate removable) {
ASSERT(HasEnvironment());
@@ -700,13 +681,16 @@ HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
: builder_(builder),
position_(position),
finished_(false),
+ deopt_then_(false),
+ deopt_else_(false),
did_then_(false),
did_else_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(true),
- split_edge_merge_block_(NULL) {
+ split_edge_merge_block_(NULL),
+ merge_block_(NULL) {
HEnvironment* env = builder->environment();
first_true_block_ = builder->CreateBasicBlock(env->Copy());
last_true_block_ = NULL;
@@ -720,6 +704,8 @@ HGraphBuilder::IfBuilder::IfBuilder(
: builder_(builder),
position_(RelocInfo::kNoPosition),
finished_(false),
+ deopt_then_(false),
+ deopt_else_(false),
did_then_(false),
did_else_(false),
did_and_(false),
@@ -836,14 +822,13 @@ void HGraphBuilder::IfBuilder::Else() {
void HGraphBuilder::IfBuilder::Deopt() {
- HBasicBlock* block = builder_->current_block();
- block->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- builder_->set_current_block(NULL);
+ ASSERT(did_then_);
if (did_else_) {
- first_false_block_ = NULL;
+ deopt_else_ = true;
} else {
- first_true_block_ = NULL;
+ deopt_then_ = true;
}
+ builder_->Add<HDeoptimize>(Deoptimizer::EAGER);
}
@@ -868,20 +853,30 @@ void HGraphBuilder::IfBuilder::End() {
last_true_block_ = builder_->current_block();
}
if (first_true_block_ == NULL) {
- // Deopt on true. Nothing to do, just continue the false block.
+ // Return on true. Nothing to do, just continue the false block.
} else if (first_false_block_ == NULL) {
// Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_);
} else {
- HEnvironment* merge_env = last_true_block_->last_environment()->Copy();
- merge_block_ = builder_->CreateBasicBlock(merge_env);
+ merge_block_ = builder_->graph()->CreateBasicBlock();
ASSERT(!finished_);
if (!did_else_) Else();
ASSERT(!last_true_block_->IsFinished());
HBasicBlock* last_false_block = builder_->current_block();
ASSERT(!last_false_block->IsFinished());
- last_true_block_->GotoNoSimulate(merge_block_);
- last_false_block->GotoNoSimulate(merge_block_);
+ if (deopt_then_) {
+ last_false_block->GotoNoSimulate(merge_block_);
+ builder_->PadEnvironmentForContinuation(last_true_block_,
+ merge_block_);
+ last_true_block_->GotoNoSimulate(merge_block_);
+ } else {
+ last_true_block_->GotoNoSimulate(merge_block_);
+ if (deopt_else_) {
+ builder_->PadEnvironmentForContinuation(last_false_block,
+ merge_block_);
+ }
+ last_false_block->GotoNoSimulate(merge_block_);
+ }
builder_->set_current_block(merge_block_);
}
}
@@ -991,36 +986,6 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddSimulate(BailoutId id,
- RemovableSimulate removable) {
- ASSERT(current_block() != NULL);
- ASSERT(no_side_effects_scope_count_ == 0);
- current_block()->AddSimulate(id, removable);
-}
-
-
-HReturn* HGraphBuilder::AddReturn(HValue* value) {
- HValue* context = environment()->LookupContext();
- int num_parameters = graph()->info()->num_parameters();
- HValue* params = Add<HConstant>(num_parameters);
- HReturn* return_instruction = new(graph()->zone())
- HReturn(value, context, params);
- current_block()->FinishExit(return_instruction);
- return return_instruction;
-}
-
-
-void HGraphBuilder::AddSoftDeoptimize(SoftDeoptimizeMode mode) {
- isolate()->counters()->soft_deopts_requested()->Increment();
- if (FLAG_always_opt && mode == CAN_OMIT_SOFT_DEOPT) return;
- if (current_block()->IsDeoptimizing()) return;
- Add<HSoftDeoptimize>();
- isolate()->counters()->soft_deopts_inserted()->Increment();
- current_block()->MarkAsDeoptimizing();
- graph()->set_has_soft_deoptimize(true);
-}
-
-
HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
HBasicBlock* b = graph()->CreateBasicBlock();
b->SetInitialEnvironment(env);
@@ -1043,14 +1008,52 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-HValue* HGraphBuilder::BuildCheckMap(HValue* obj,
- Handle<Map> map) {
- HCheckMaps* check = HCheckMaps::New(obj, map, zone());
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+ HBasicBlock* continuation) {
+ PadEnvironmentForContinuation(current_block(), continuation);
+ Add<HDeoptimize>(Deoptimizer::EAGER);
+ if (no_side_effects_scope_count_ > 0) {
+ current_block()->GotoNoSimulate(continuation);
+ } else {
+ current_block()->Goto(continuation);
+ }
+}
+
+
+void HGraphBuilder::PadEnvironmentForContinuation(
+ HBasicBlock* from,
+ HBasicBlock* continuation) {
+ if (continuation->last_environment() != NULL) {
+ // When merging from a deopt block to a continuation, resolve differences in
+ // environment by pushing undefined and popping extra values so that the
+ // environments match during the join.
+ int continuation_env_length = continuation->last_environment()->length();
+ while (continuation_env_length != from->last_environment()->length()) {
+ if (continuation_env_length > from->last_environment()->length()) {
+ from->last_environment()->Push(graph()->GetConstantUndefined());
+ } else {
+ from->last_environment()->Pop();
+ }
+ }
+ } else {
+ ASSERT(continuation->predecessors()->length() == 0);
+ }
+}
+
+
+HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
+ HCheckMaps* check = HCheckMaps::New(obj, map, zone(), top_info());
AddInstruction(check);
return check;
}
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+ if (object->type().IsJSObject()) return object;
+ return Add<HWrapReceiver>(object, function);
+}
+
+
HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* elements,
ElementsKind kind,
@@ -1092,10 +1095,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HAdd::New(zone, context, key, graph_->GetConstant1()));
new_length->ClearFlag(HValue::kCanOverflow);
- Representation representation = IsFastElementsKind(kind)
- ? Representation::Smi() : Representation::Tagged();
- AddStore(object, HObjectAccess::ForArrayLength(), new_length,
- representation);
+ AddStore(object, HObjectAccess::ForArrayLength(kind), new_length);
}
length_checker.Else();
@@ -1163,10 +1163,8 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HInstruction* array_length = is_jsarray
- ? AddLoad(object, HObjectAccess::ForArrayLength(),
- NULL, Representation::Smi())
+ ? AddLoad(object, HObjectAccess::ForArrayLength(from_kind), NULL)
: elements_length;
- array_length->set_type(HType::Smi());
BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
array_length, elements_length);
@@ -1208,14 +1206,14 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = HCheckMaps::New(
- elements, isolate()->factory()->fixed_array_map(), zone);
+ elements, isolate()->factory()->fixed_array_map(), zone, top_info());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
}
HInstruction* length = NULL;
if (is_js_array) {
- length = AddLoad(object, HObjectAccess::ForArrayLength(), mapcheck,
- Representation::Smi());
+ length = AddLoad(object, HObjectAccess::ForArrayLength(elements_kind),
+ mapcheck);
} else {
length = AddLoadFixedArrayLength(elements);
}
@@ -1276,7 +1274,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
length);
} else {
HCheckMaps* check_cow_map = HCheckMaps::New(
- elements, isolate()->factory()->fixed_array_map(), zone);
+ elements, isolate()->factory()->fixed_array_map(),
+ zone, top_info());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
}
@@ -1330,10 +1329,7 @@ void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
: factory->fixed_array_map();
AddStoreMapConstant(elements, map);
- Representation representation = IsFastElementsKind(kind)
- ? Representation::Smi() : Representation::Tagged();
- AddStore(elements, HObjectAccess::ForFixedArrayLength(), capacity,
- representation);
+ AddStore(elements, HObjectAccess::ForFixedArrayLength(), capacity);
}
@@ -1350,6 +1346,7 @@ HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
HValue* array_map,
AllocationSiteMode mode,
+ ElementsKind elements_kind,
HValue* allocation_site_payload,
HValue* length_field) {
@@ -1360,7 +1357,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
AddStore(array, access, empty_fixed_array);
- AddStore(array, HObjectAccess::ForArrayLength(), length_field);
+ AddStore(array, HObjectAccess::ForArrayLength(elements_kind), length_field);
if (mode == TRACK_ALLOCATION_SITE) {
BuildCreateAllocationMemento(array,
@@ -1467,10 +1464,7 @@ HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
- HLoadNamedField* instr = AddLoad(object, HObjectAccess::ForFixedArrayLength(),
- NULL, Representation::Smi());
- instr->set_type(HType::Smi());
- return instr;
+ return AddLoad(object, HObjectAccess::ForFixedArrayLength());
}
@@ -1719,7 +1713,7 @@ HInstruction* HGraphBuilder::BuildUnaryMathOp(
input, graph()->GetConstantMinus1());
Representation rep = Representation::FromType(type);
if (type->Is(Type::None())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
if (instr->IsBinaryOperation()) {
HBinaryOperation* binop = HBinaryOperation::cast(instr);
@@ -1730,7 +1724,7 @@ HInstruction* HGraphBuilder::BuildUnaryMathOp(
}
case Token::BIT_NOT:
if (type->Is(Type::None())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
return new(zone()) HBitNot(input);
}
@@ -1841,11 +1835,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- HInstruction* load =
- builder()->BuildLoadNamedField(constructor_function_,
- access,
- Representation::Tagged());
- return builder()->AddInstruction(load);
+ return builder()->AddInstruction(
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
HInstruction* native_context = builder()->BuildGetNativeContext(context);
@@ -1866,9 +1857,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_,
- access,
- Representation::Tagged()));
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
@@ -1882,11 +1871,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
base_size += AllocationMemento::kSize;
}
- if (IsFastDoubleElementsKind(kind_)) {
- base_size += FixedDoubleArray::kHeaderSize;
- } else {
- base_size += FixedArray::kHeaderSize;
- }
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+ base_size += FixedArray::kHeaderSize;
HInstruction* elements_size_value =
builder()->Add<HConstant>(elements_size());
@@ -1956,6 +1942,7 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
elements_location_ = builder()->BuildJSArrayHeader(new_object,
map,
mode_,
+ kind_,
allocation_site_payload_,
length_field);
@@ -1973,17 +1960,15 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
HStoreNamedField* HGraphBuilder::AddStore(HValue *object,
HObjectAccess access,
- HValue *val,
- Representation representation) {
- return Add<HStoreNamedField>(object, access, val, representation);
+ HValue *val) {
+ return Add<HStoreNamedField>(object, access, val);
}
HLoadNamedField* HGraphBuilder::AddLoad(HValue *object,
HObjectAccess access,
- HValue *typecheck,
- Representation representation) {
- return Add<HLoadNamedField>(object, access, typecheck, representation);
+ HValue *typecheck) {
+ return Add<HLoadNamedField>(object, access, typecheck);
}
@@ -2638,7 +2623,7 @@ void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
@@ -2680,7 +2665,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
owner()->AddInstruction(instr);
owner()->Push(instr);
if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
@@ -2736,7 +2721,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
@@ -2924,7 +2909,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
VisitVariableDeclaration(scope->function());
}
VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
+ Add<HSimulate>(BailoutId::Declarations());
HValue* context = environment()->LookupContext();
Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
@@ -2933,7 +2918,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
- AddReturn(graph()->GetConstantUndefined());
+ Add<HReturn>(graph()->GetConstantUndefined());
set_current_block(NULL);
}
@@ -3031,6 +3016,9 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
Run<HBoundsCheckEliminationPhase>();
}
+ if (FLAG_array_bounds_checks_hoisting && !FLAG_idefs) {
+ Run<HBoundsCheckHoistingPhase>();
+ }
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
@@ -3112,7 +3100,7 @@ void HGraph::RestoreActualValues() {
}
-void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HGraphBuilder::PushAndAdd(HInstruction* instr) {
Push(instr);
AddInstruction(instr);
}
@@ -3223,10 +3211,10 @@ void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->condition()->ToBooleanIsTrue()) {
- AddSimulate(stmt->ThenId());
+ Add<HSimulate>(stmt->ThenId());
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
- AddSimulate(stmt->ElseId());
+ Add<HSimulate>(stmt->ElseId());
Visit(stmt->else_statement());
} else {
HBasicBlock* cond_true = graph()->CreateBasicBlock();
@@ -3333,7 +3321,7 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
- AddReturn(result);
+ Add<HReturn>(result);
} else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
// Return from an inlined construct call. In a test context the return value
// will always evaluate to true, in a value context the return value needs
@@ -3425,7 +3413,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HValue* context = environment()->LookupContext();
CHECK_ALIVE(VisitForValue(stmt->tag()));
- AddSimulate(stmt->EntryId());
+ Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Pop();
HBasicBlock* first_test_block = current_block();
@@ -3465,7 +3453,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
if (!clause->compare_type()->Is(Type::Smi())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
HCompareNumericAndBranch* compare_ =
@@ -3515,7 +3503,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
normal_block = last_block;
last_block = NULL; // Cleared to indicate we've handled it.
}
- } else if (!curr_test_block->end()->IsDeoptimize()) {
+ } else {
normal_block = curr_test_block->end()->FirstSuccessor();
curr_test_block = curr_test_block->end()->SecondSuccessor();
}
@@ -3569,7 +3557,7 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
- AddSimulate(stmt->StackCheckId());
+ Add<HSimulate>(stmt->StackCheckId());
HValue* context = environment()->LookupContext();
HStackCheck* stack_check = Add<HStackCheck>(
context, HStackCheck::kBackwardsBranch);
@@ -3730,7 +3718,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HInstruction* map = Add<HForInPrepareMap>(
environment()->LookupContext(), enumerable);
- AddSimulate(stmt->PrepareId());
+ Add<HSimulate>(stmt->PrepareId());
HInstruction* array = Add<HForInCacheArray>(
enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
@@ -3799,7 +3787,6 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
environment()->LookupContext(),
current_index,
graph()->GetConstant1());
- new_index->AssumeRepresentation(Representation::Integer32());
PushAndAdd(new_index);
body_exit = current_block();
}
@@ -4327,7 +4314,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
AddInstruction(store);
if (store->HasObservableSideEffects()) {
- AddSimulate(key->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
}
} else {
CHECK_ALIVE(VisitForEffect(value));
@@ -4450,7 +4437,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
isolate());
- AddInstruction(HCheckMaps::New(literal, map, zone()));
+ AddInstruction(HCheckMaps::New(literal, map, zone(), top_info()));
}
// The array is expected in the bailout environment during computation
@@ -4492,7 +4479,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
break;
}
- AddSimulate(expr->GetIdForElement(i));
+ Add<HSimulate>(expr->GetIdForElement(i));
}
Drop(1); // array literal index
@@ -4525,30 +4512,17 @@ static bool ComputeLoadStoreField(Handle<Map> type,
}
-static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
- LookupResult* lookup) {
- if (lookup->IsField()) {
- return lookup->representation();
- } else {
- Map* transition = lookup->GetTransitionMapFromMap(*type);
- int descriptor = transition->LastAdded();
- PropertyDetails details =
- transition->instance_descriptors()->GetDetails(descriptor);
- return details.representation();
- }
-}
-
-
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
BuildCheckHeapObject(object);
- AddInstruction(HCheckMaps::New(object, map, zone()));
+ AddInstruction(HCheckMaps::New(object, map, zone(), top_info()));
}
void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
+ AddInstruction(HCheckMaps::NewWithTransitions(
+ object, map, zone(), top_info()));
}
@@ -4589,33 +4563,33 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
- Representation representation = ComputeLoadStoreRepresentation(map, lookup);
bool transition_to_field = lookup->IsTransitionToField(*map);
HStoreNamedField *instr;
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ HObjectAccess heap_number_access =
+ field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
- HInstruction* double_box = Add<HAllocate>(
+ HInstruction* heap_number = Add<HAllocate>(
environment()->LookupContext(), heap_number_size,
HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE);
- AddStoreMapConstant(double_box, isolate()->factory()->heap_number_map());
- AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
- value, Representation::Double());
- instr = new(zone()) HStoreNamedField(object, field_access, double_box);
+ AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
+ AddStore(heap_number, HObjectAccess::ForHeapNumberValue(), value);
+ instr = new(zone()) HStoreNamedField(
+ object, heap_number_access, heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
- HInstruction* double_box = AddLoad(object, field_access);
- double_box->set_type(HType::HeapNumber());
- instr = new(zone()) HStoreNamedField(double_box,
- HObjectAccess::ForHeapNumberValue(), value, Representation::Double());
+ HInstruction* heap_number = AddLoad(object, heap_number_access);
+ heap_number->set_type(HType::HeapNumber());
+ instr = new(zone()) HStoreNamedField(heap_number,
+ HObjectAccess::ForHeapNumberValue(), value);
}
} else {
- // This is a non-double store.
- instr = new(zone()) HStoreNamedField(
- object, field_access, value, representation);
+ // This is a normal store.
+ instr = new(zone()) HStoreNamedField(object, field_access, value);
}
if (transition_to_field) {
@@ -4682,20 +4656,18 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
LookupResult lookup(isolate());
int count;
- Representation representation = Representation::None();
HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
for (count = 0; count < types->length(); ++count) {
Handle<Map> map = types->at(count);
if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
- Representation new_representation =
- ComputeLoadStoreRepresentation(map, &lookup);
if (count == 0) {
// First time through the loop; set access and representation.
access = new_access;
- } else if (!representation.IsCompatibleForLoad(new_representation)) {
+ } else if (!access.representation().IsCompatibleForLoad(
+ new_access.representation())) {
// Representations did not match.
break;
} else if (access.offset() != new_access.offset()) {
@@ -4705,14 +4677,15 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
// In-objectness did not match.
break;
}
- representation = representation.generalize(new_representation);
+ access = access.WithRepresentation(
+ access.representation().generalize(new_access.representation()));
}
if (count == types->length()) {
// Everything matched; can use monomorphic load.
BuildCheckHeapObject(object);
AddInstruction(HCheckMaps::New(object, types, zone()));
- return BuildLoadNamedField(object, access, representation);
+ return BuildLoadNamedField(object, access);
}
if (count != 0) return NULL;
@@ -4734,14 +4707,14 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
BuildCheckHeapObject(object);
AddInstruction(HCheckMaps::New(object, types, zone()));
+
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>::cast(prototype), holder, zone(), top_info()));
HValue* holder_value = AddInstruction(new(zone()) HConstant(holder));
return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name),
- ComputeLoadStoreRepresentation(map, &lookup));
+ HObjectAccess::ForField(holder_map, &lookup, name));
}
@@ -4790,8 +4763,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
ASSERT(!map->is_observed());
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
- Representation new_representation =
- ComputeLoadStoreRepresentation(map, &lookup);
+ Representation new_representation = new_access.representation();
if (count == 0) {
// First time through the loop; set access and representation.
@@ -4822,7 +4794,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
if (!ast_context()->IsEffect()) Push(result_value);
store->set_position(position);
AddInstruction(store);
- AddSimulate(assignment_id);
+ Add<HSimulate>(assignment_id);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(result_value);
return true;
@@ -4881,7 +4853,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ FinishExitWithHardDeoptimization(join);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
instr->set_position(position);
@@ -4898,10 +4870,10 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+ Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
} else {
Push(result_value);
- AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+ Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -4929,7 +4901,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
HValue* value = environment()->ExpressionStackAt(0);
HValue* object = environment()->ExpressionStackAt(1);
- if (expr->IsUninitialized()) AddSoftDeoptimize();
+ if (expr->IsUninitialized()) Add<HDeoptimize>(Deoptimizer::SOFT);
return BuildStoreNamed(expr, expr->id(), expr->position(),
expr->AssignmentId(), prop, object, value, value);
} else {
@@ -4946,7 +4918,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
&has_side_effects);
Drop(3);
Push(value);
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
}
@@ -4975,14 +4947,14 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
- AddSoftDeoptimize(MUST_EMIT_SOFT_DEOPT);
+ Add<HDeoptimize>(Deoptimizer::EAGER);
builder.End();
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
if (instr->HasObservableSideEffects()) {
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
} else {
HValue* context = environment()->LookupContext();
@@ -4992,7 +4964,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
value, function_strict_mode_flag());
instr->set_position(position);
ASSERT(instr->HasObservableSideEffects());
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
@@ -5054,7 +5026,7 @@ void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
instr->set_position(position);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(id, REMOVABLE_SIMULATE);
+ Add<HSimulate>(id, REMOVABLE_SIMULATE);
}
if (!ast_context()->IsEffect()) Drop(1);
return ast_context()->ReturnValue(result_value);
@@ -5132,7 +5104,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
mode, Top());
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -5173,7 +5145,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
PushAndAdd(load);
if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
}
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5183,7 +5155,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
}
return BuildStoreNamed(prop, expr->id(), expr->position(),
@@ -5201,7 +5173,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -5210,7 +5182,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
}
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -5222,7 +5194,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Drop(3);
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
@@ -5344,7 +5316,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
mode, Top());
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
@@ -5378,30 +5350,24 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
HValue* value = environment()->Pop();
HThrow* instr = Add<HThrow>(context, value);
instr->set_position(expr->position());
- AddSimulate(expr->id());
+ Add<HSimulate>(expr->id());
current_block()->FinishExit(new(zone()) HAbnormalExit);
set_current_block(NULL);
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(
- HValue* object,
- HObjectAccess access,
- Representation representation) {
- bool load_double = false;
- if (representation.IsDouble()) {
- representation = Representation::Tagged();
- load_double = FLAG_track_double_fields;
- }
- HLoadNamedField* field =
- new(zone()) HLoadNamedField(object, access, NULL, representation);
- if (load_double) {
- AddInstruction(field);
- field->set_type(HType::HeapNumber());
- return new(zone()) HLoadNamedField(field,
- HObjectAccess::ForHeapNumberValue(), NULL, Representation::Double());
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ if (FLAG_track_double_fields && access.representation().IsDouble()) {
+ // load the heap number
+ HLoadNamedField* heap_number =
+ AddLoad(object, access.WithRepresentation(Representation::Tagged()));
+ heap_number->set_type(HType::HeapNumber());
+ // load the double value from it
+ return new(zone()) HLoadNamedField(heap_number,
+ HObjectAccess::ForHeapNumberValue(), NULL);
}
- return field;
+ return new(zone()) HLoadNamedField(object, access, NULL);
}
@@ -5410,7 +5376,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Handle<String> name,
Property* expr) {
if (expr->IsUninitialized()) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
HValue* context = environment()->LookupContext();
return new(zone()) HLoadNamedGeneric(context, object, name);
@@ -5441,7 +5407,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
if (map->instance_type() == JS_ARRAY_TYPE) {
AddCheckMapsWithTransitions(object, map);
return new(zone()) HLoadNamedField(object,
- HObjectAccess::ForArrayLength());
+ HObjectAccess::ForArrayLength(map->elements_kind()));
}
}
@@ -5450,15 +5416,14 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
if (lookup.IsField()) {
AddCheckMap(object, map);
return BuildLoadNamedField(object,
- HObjectAccess::ForField(map, &lookup, name),
- ComputeLoadStoreRepresentation(map, &lookup));
+ HObjectAccess::ForField(map, &lookup, name));
}
// Handle a load of a constant known function.
- if (lookup.IsConstantFunction()) {
+ if (lookup.IsConstant()) {
AddCheckMap(object, map);
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new(zone()) HConstant(function);
+ Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
+ return new(zone()) HConstant(constant);
}
// Handle a load from a known field somewhere in the prototype chain.
@@ -5471,19 +5436,18 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
HValue* holder_value = Add<HConstant>(holder);
return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name),
- ComputeLoadStoreRepresentation(map, &lookup));
+ HObjectAccess::ForField(holder_map, &lookup, name));
}
// Handle a load of a constant function somewhere in the prototype chain.
- if (lookup.IsConstantFunction()) {
+ if (lookup.IsConstant()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
- return new(zone()) HConstant(function);
+ Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate());
+ return new(zone()) HConstant(constant);
}
// No luck, do a generic load.
@@ -5506,7 +5470,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
Handle<Map> map,
bool is_store,
KeyedAccessStoreMode store_mode) {
- HCheckMaps* mapcheck = HCheckMaps::New(object, map, zone(), dependency);
+ HCheckMaps* mapcheck = HCheckMaps::New(
+ object, map, zone(), top_info(), dependency);
AddInstruction(mapcheck);
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
@@ -5690,12 +5655,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
AddInstruction(HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(),
- zone(), mapcompare));
+ zone(), top_info(), mapcompare));
}
- if (map->IsJSArray()) {
- HInstruction* length = AddLoad(object, HObjectAccess::ForArrayLength(),
- mapcompare, Representation::Smi());
- length->set_type(HType::Smi());
+ if (map->instance_type() == JS_ARRAY_TYPE) {
+ HInstruction* length = AddLoad(
+ object, HObjectAccess::ForArrayLength(elements_kind), mapcompare);
checked_key = Add<HBoundsCheck>(key, length);
} else {
HInstruction* length = AddLoadFixedArrayLength(elements);
@@ -5732,7 +5696,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
}
// Deopt if none of the cases matched.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ NoObservableSideEffectsScope scope(this);
+ FinishExitWithHardDeoptimization(join);
set_current_block(join);
return is_store ? NULL : Pop();
}
@@ -5768,12 +5733,12 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
} else {
if (is_store) {
if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->IsUninitialized()) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
}
instr = BuildLoadKeyedGeneric(obj, key);
}
@@ -5952,10 +5917,10 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(load);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -6057,7 +6022,7 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -6189,7 +6154,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ // Because the deopt may be the only path in the polymorphic call, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful call.
+ Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
+ FinishExitWithHardDeoptimization(join);
} else {
HValue* context = environment()->LookupContext();
HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
@@ -6447,7 +6416,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
inner_env->BindContext(context);
#endif
- AddSimulate(return_id);
+ Add<HSimulate>(return_id);
current_block()->UpdateEnvironment(inner_env);
HArgumentsObject* arguments_object = NULL;
@@ -6881,7 +6850,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
- HValue* wrapped_receiver = Add<HWrapReceiver>(receiver, function);
+ HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
HInstruction* result =
new(zone()) HApplyArguments(function,
wrapped_receiver,
@@ -6898,7 +6867,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HArgumentsObject* args = function_state()->entry()->arguments_object();
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
- PushAndAdd(new(zone()) HWrapReceiver(receiver, function));
+ Push(BuildWrapReceiver(receiver, function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
@@ -7460,8 +7429,8 @@ void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
Handle<Type> operand_type = expr->expression()->bounds().lower;
+ HValue* value = TruncateToNumber(Pop(), &operand_type);
HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::SUB);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7469,8 +7438,8 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
Handle<Type> operand_type = expr->expression()->bounds().lower;
+ HValue* value = TruncateToNumber(Pop(), &operand_type);
HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::BIT_NOT);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7625,7 +7594,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
mode, after);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -7668,7 +7637,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
PushAndAdd(load);
if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
}
after = BuildIncrement(returns_original_input, expr);
@@ -7691,7 +7660,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
input = environment()->ExpressionStackAt(0);
@@ -7708,7 +7677,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
}
@@ -7800,6 +7769,40 @@ bool CanBeZero(HValue* right) {
}
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
+ if (number.has_value) {
+ *expected = handle(Type::Number(), isolate());
+ return AddInstruction(number.value);
+ }
+ return value;
+ }
+
+ Handle<Type> expected_type = *expected;
+ Representation rep = Representation::FromType(expected_type);
+ if (!rep.IsTagged()) return value;
+
+ // If our type feedback suggests that we can non-observably truncate to number
+ // we introduce the appropriate check here. This avoids 'value' having a
+ // tagged representation later on.
+ if (expected_type->Is(Type::Oddball())) {
+ // TODO(olivf) The BinaryOpStub only records undefined. It might pay off to
+ // also record booleans and convert them to 0/1 here.
+ IfBuilder if_nan(this);
+ if_nan.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantUndefined());
+ if_nan.Then();
+ if_nan.ElseDeopt();
+ if_nan.End();
+ return Add<HConstant>(OS::nan_value(), Representation::Double());
+ }
+
+ return value;
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
@@ -7813,13 +7816,21 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
Representation right_rep = Representation::FromType(right_type);
Representation result_rep = Representation::FromType(result_type);
+ if (expr->op() != Token::ADD ||
+ (left->type().IsNonString() && right->type().IsNonString())) {
+ // For addition we can only truncate the arguments to number if we can
+ // prove that we will not end up in string concatenation mode.
+ left = TruncateToNumber(left, &left_type);
+ right = TruncateToNumber(right, &right_type);
+ }
+
if (left_type->Is(Type::None())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous defaults.
left_type = handle(Type::Any(), isolate());
}
if (right_type->Is(Type::None())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
}
HInstruction* instr = NULL;
@@ -8169,7 +8180,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
- AddSoftDeoptimize();
+ Add<HDeoptimize>(Deoptimizer::SOFT);
combined_type = left_type = right_type = handle(Type::Any(), isolate());
}
@@ -8445,11 +8456,8 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
- Representation representation =
- IsFastElementsKind(boilerplate_array->GetElementsKind())
- ? Representation::Smi() : Representation::Tagged();
- AddStore(object_header, HObjectAccess::ForArrayLength(),
- length, representation);
+ AddStore(object_header, HObjectAccess::ForArrayLength(
+ boilerplate_array->GetElementsKind()), length);
}
return result;
@@ -8515,7 +8523,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
- value_instruction, Representation::Double());
+ value_instruction);
value_instruction = double_box;
}
@@ -8672,7 +8680,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
}
}
break;
@@ -8710,7 +8718,7 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
}
break;
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 797b444078..895b9849c9 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -69,6 +69,11 @@ class HBasicBlock: public ZoneObject {
void set_last(HInstruction* instr) { last_ = instr; }
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
+ HLoopInformation* current_loop() const {
+ return IsLoopHeader() ? loop_information()
+ : (parent_loop_header() != NULL
+ ? parent_loop_header()->loop_information() : NULL);
+ }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
bool HasPredecessor() const { return predecessors_.length() > 0; }
const ZoneList<HBasicBlock*>* dominated_blocks() const {
@@ -137,17 +142,15 @@ class HBasicBlock: public ZoneObject {
}
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE) {
- AddInstruction(CreateSimulate(ast_id, removable));
+ HSimulate* AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE) {
+ HSimulate* instr = CreateSimulate(ast_id, removable);
+ AddInstruction(instr);
+ return instr;
}
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
- void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
- FinishExit(CreateDeoptimize(has_uses));
- }
-
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value, FunctionState* state);
@@ -182,11 +185,12 @@ class HBasicBlock: public ZoneObject {
#endif
private:
+ friend class HGraphBuilder;
+
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
HGraph* graph_;
@@ -272,6 +276,20 @@ class HLoopInformation: public ZoneObject {
stack_check_ = stack_check;
}
+ bool IsNestedInThisLoop(HLoopInformation* other) {
+ while (other != NULL) {
+ if (other == this) {
+ return true;
+ }
+ other = other->parent_loop();
+ }
+ return false;
+ }
+ HLoopInformation* parent_loop() {
+ HBasicBlock* parent_header = loop_header()->parent_loop_header();
+ return parent_header != NULL ? parent_header->loop_information() : NULL;
+ }
+
private:
void AddBlock(HBasicBlock* block);
@@ -283,6 +301,7 @@ class HLoopInformation: public ZoneObject {
class BoundsCheckTable;
+class InductionVariableBlocksTable;
class HGraph: public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
@@ -449,6 +468,7 @@ class HGraph: public ZoneObject {
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
+ void EliminateRedundantBoundsChecksUsingInductionVariables();
Isolate* isolate_;
int next_block_id_;
@@ -1023,11 +1043,6 @@ class HGraphBuilder {
new(zone()) I(p1, p2, p3, p4, p5, p6, p7, p8)));
}
- void AddSimulate(BailoutId id,
- RemovableSimulate removable = FIXED_SIMULATE);
-
- HReturn* AddReturn(HValue* value);
-
void IncrementInNoSideEffectsScope() {
no_side_effects_scope_count_++;
}
@@ -1044,6 +1059,7 @@ class HGraphBuilder {
HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+ HValue* BuildWrapReceiver(HValue* object, HValue* function);
// Building common constructs
HValue* BuildCheckForCapacityGrow(HValue* object,
@@ -1078,8 +1094,7 @@ class HGraphBuilder {
HLoadNamedField* AddLoad(
HValue *object,
HObjectAccess access,
- HValue *typecheck = NULL,
- Representation representation = Representation::Tagged());
+ HValue *typecheck = NULL);
HLoadNamedField* BuildLoadNamedField(
HValue* object,
@@ -1104,26 +1119,19 @@ class HGraphBuilder {
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
- HStoreNamedField* AddStore(
- HValue *object,
- HObjectAccess access,
- HValue *val,
- Representation representation = Representation::Tagged());
-
+ HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HStoreNamedField* AddStore(HValue *object, HObjectAccess access, HValue *val);
HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
-
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
-
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin, HValue* context);
- enum SoftDeoptimizeMode {
- MUST_EMIT_SOFT_DEOPT,
- CAN_OMIT_SOFT_DEOPT
- };
+ HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
+
+ void PushAndAdd(HInstruction* instr);
- void AddSoftDeoptimize(SoftDeoptimizeMode mode = CAN_OMIT_SOFT_DEOPT);
+ void FinishExitWithHardDeoptimization(HBasicBlock* continuation);
class IfBuilder {
public:
@@ -1228,7 +1236,6 @@ class HGraphBuilder {
void ElseDeopt() {
Else();
Deopt();
- End();
}
void Return(HValue* value);
@@ -1241,6 +1248,8 @@ class HGraphBuilder {
HGraphBuilder* builder_;
int position_;
bool finished_ : 1;
+ bool deopt_then_ : 1;
+ bool deopt_else_ : 1;
bool did_then_ : 1;
bool did_else_ : 1;
bool did_and_ : 1;
@@ -1373,6 +1382,7 @@ class HGraphBuilder {
HInnerAllocatedObject* BuildJSArrayHeader(HValue* array,
HValue* array_map,
AllocationSiteMode mode,
+ ElementsKind elements_kind,
HValue* allocation_site_payload,
HValue* length_field);
@@ -1422,12 +1432,68 @@ class HGraphBuilder {
private:
HGraphBuilder();
+
+ void PadEnvironmentForContinuation(HBasicBlock* from,
+ HBasicBlock* continuation);
+
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
int no_side_effects_scope_count_;
};
+
+template<>
+inline HDeoptimize* HGraphBuilder::Add(Deoptimizer::BailoutType type) {
+ if (type == Deoptimizer::SOFT) {
+ isolate()->counters()->soft_deopts_requested()->Increment();
+ if (FLAG_always_opt) return NULL;
+ }
+ if (current_block()->IsDeoptimizing()) return NULL;
+ HDeoptimize* instr = new(zone()) HDeoptimize(type);
+ AddInstruction(instr);
+ if (type == Deoptimizer::SOFT) {
+ isolate()->counters()->soft_deopts_inserted()->Increment();
+ graph()->set_has_soft_deoptimize(true);
+ }
+ current_block()->MarkAsDeoptimizing();
+ return instr;
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add(BailoutId id,
+ RemovableSimulate removable) {
+ HSimulate* instr = current_block()->CreateSimulate(id, removable);
+ AddInstruction(instr);
+ return instr;
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add(BailoutId id) {
+ return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add(HValue* value) {
+ HValue* context = environment()->LookupContext();
+ int num_parameters = graph()->info()->num_parameters();
+ HValue* params = Add<HConstant>(num_parameters);
+ HReturn* return_instruction = new(graph()->zone())
+ HReturn(value, context, params);
+ current_block()->FinishExit(return_instruction);
+ return return_instruction;
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add(HConstant* p1) {
+ return Add<HReturn>(static_cast<HValue*>(p1));
+}
+
+
class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
@@ -1666,8 +1732,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
- void PushAndAdd(HInstruction* instr);
-
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
@@ -2021,10 +2085,14 @@ class HTracer: public Malloced {
public:
explicit HTracer(int isolate_id)
: trace_(&string_allocator_), indent_(0) {
- OS::SNPrintF(filename_,
- "hydrogen-%d-%d.cfg",
- OS::GetCurrentProcessId(),
- isolate_id);
+ if (FLAG_trace_hydrogen_file == NULL) {
+ OS::SNPrintF(filename_,
+ "hydrogen-%d-%d.cfg",
+ OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ OS::StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
+ }
WriteChars(filename_.start(), "", 0, false);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 548cbaace7..5789f49216 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -43,6 +43,16 @@ namespace v8 {
namespace internal {
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -300,27 +310,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
- __ ret(0);
-
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin, Label::kNear);
- __ ret(0);
-
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 505cd4fc1c..48968064aa 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -114,22 +114,8 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
}
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
- // The optimized code is going to be patched, so we cannot use it any more.
- function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
-
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -188,25 +174,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(junk_address <= reloc_end_address);
isolate->heap()->CreateFillerObjectAt(junk_address,
reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
}
@@ -741,6 +708,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 66a7c1c080..8f11acc1be 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -4045,7 +4045,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 2c234d834c..d022a82f4d 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -656,9 +656,18 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
@@ -1003,12 +1012,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1625,6 +1628,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ imul(left, left, constant);
}
} else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(left);
+ }
__ imul(left, ToOperand(right));
}
@@ -1661,7 +1667,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
+ int right_operand = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
@@ -1772,7 +1779,8 @@ void LCodeGen::DoSubI(LSubI* instr) {
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToInteger32Immediate(right));
+ __ sub(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
@@ -1842,11 +1850,7 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
AllowDeferredHandleDereference smi_check;
- if (handle->IsHeapObject()) {
- __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
+ __ LoadObject(reg, handle);
}
@@ -1985,7 +1989,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
- int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
__ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
@@ -1993,7 +1998,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
+ __ add(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else {
__ add(ToRegister(left), ToOperand(right));
}
@@ -2010,17 +2016,18 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
: greater_equal;
if (right->IsConstantOperand()) {
Operand left_op = ToOperand(left);
- Immediate right_imm = ToInteger32Immediate(right);
- __ cmp(left_op, right_imm);
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+ instr->hydrogen()->representation());
+ __ cmp(left_op, immediate);
__ j(condition, &return_left, Label::kNear);
- __ mov(left_op, right_imm);
+ __ mov(left_op, immediate);
} else {
Register left_reg = ToRegister(left);
Operand right_op = ToOperand(right);
@@ -2388,19 +2395,11 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
- int32_t const_value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToOperand(left), Immediate(Smi::FromInt(const_value)));
- } else {
- __ cmp(ToOperand(left), Immediate(const_value));
- }
+ __ cmp(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else if (left->IsConstantOperand()) {
- int32_t const_value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToOperand(right), Immediate(Smi::FromInt(const_value)));
- } else {
- __ cmp(ToOperand(right), Immediate(const_value));
- }
+ __ cmp(ToOperand(right),
+ ToImmediate(left, instr->hydrogen()->representation()));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
@@ -2426,14 +2425,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- __ cmp(left, instr->hydrogen()->right());
- EmitBranch(instr, equal);
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -3074,11 +3065,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
+void LCodeGen::EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
@@ -3094,9 +3085,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -3145,7 +3136,7 @@ static bool CompactEmit(SmallMapList* list,
if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
+ return lookup.IsField() || lookup.IsConstant();
}
@@ -3177,16 +3168,14 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
} else {
Label next;
bool compact = all_are_compact ? true :
CompactEmit(instr->hydrogen()->types(), name, i, isolate());
__ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
__ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
__ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
@@ -3736,38 +3725,30 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
- Label done;
+ Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
+ __ j(zero, &done);
- __ bind(&negative);
-
- Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
-
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
instr, instr->context());
-
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
-
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
@@ -3787,9 +3768,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -4454,22 +4434,34 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
+ if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(Smi::FromInt(constant_index)));
- } else {
- __ cmp(ToOperand(instr->length()), Immediate(constant_index));
- }
- DeoptimizeIf(below_equal, instr->environment());
+ Immediate immediate =
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation());
+ __ cmp(ToOperand(instr->length()), immediate);
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? below : below_equal;
+ ApplyCheckIf(condition, instr);
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? above : above_equal;
+ ApplyCheckIf(condition, instr);
}
}
@@ -4629,10 +4621,11 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ mov(operand, ToRegister(instr->value()));
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ mov(operand, Immediate(smi_value));
+ if (IsSmi(operand_value)) {
+ Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+ __ mov(operand, immediate);
} else {
+ ASSERT(!IsInteger32(operand_value));
Handle<Object> handle_value = ToHandle(operand_value);
__ mov(operand, handle_value);
}
@@ -4795,8 +4788,9 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+ Representation::Smi());
+ __ push(immediate);
} else {
Register index = ToRegister(instr->index());
__ SmiTag(index);
@@ -5802,6 +5796,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -5992,6 +5987,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5999,11 +5995,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), instr);
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), instr);
}
}
@@ -6046,6 +6040,23 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(temp, (size / kPointerSize) - 1);
+ } else {
+ temp = ToRegister(instr->size());
+ __ shr(temp, kPointerSizeLog2);
+ __ dec(temp);
+ }
+ Label loop;
+ __ bind(&loop);
+ __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ dec(temp);
+ __ j(not_zero, &loop);
+ }
}
@@ -6306,11 +6317,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment());
- } else {
- DeoptimizeIf(no_condition, instr->environment());
- }
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ DeoptimizeIf(no_condition, instr->environment(), type);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index eb75225b99..657453231e 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -109,11 +109,8 @@ class LCodeGen BASE_EMBEDDED {
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
- Immediate ToInteger32Immediate(LOperand* op) const {
- return Immediate(ToInteger32(LConstantOperand::cast(op)));
- }
- Immediate ToSmiImmediate(LOperand* op) const {
- return Immediate(Smi::FromInt(ToInteger32(LConstantOperand::cast(op))));
+ Immediate ToImmediate(LOperand* op, const Representation& r) const {
+ return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
}
double ToDouble(LConstantOperand* op) const;
@@ -283,7 +280,7 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void SoftDeoptimize(LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -298,7 +295,8 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
X87Register ToX87Register(int index) const;
- int ToInteger32(LConstantOperand* op) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@@ -371,11 +369,11 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
+ void EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index e884a9dbce..b5bc18bdc9 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -306,10 +306,10 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Set(dst, cgen_->ToSmiImmediate(constant_source));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -339,10 +339,10 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Set(dst, cgen_->ToSmiImmediate(constant_source));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index aebe26b785..f03cd72bec 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -754,11 +754,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -837,8 +832,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
@@ -1404,9 +1399,10 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1439,7 +1435,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ } else if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
@@ -1455,7 +1453,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1521,9 +1519,10 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- if (instr->representation().IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(left->representation().IsSmiOrInteger32());
+ ASSERT(right->representation().Equals(left->representation()));
+
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
@@ -1572,9 +1571,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
@@ -1590,16 +1589,17 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1618,14 +1618,15 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
// preserve all input values for later uses.
bool use_lea = LAddI::UseLea(instr);
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
LOperand* right = use_lea
@@ -1652,9 +1653,10 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1741,13 +1743,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@@ -2063,8 +2058,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
+ LUnallocated* temp = NULL;
+ if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+ if (instr->CanOmitPrototypeChecks()) return result;
return AssignEnvironment(result);
}
@@ -2081,8 +2078,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
return AssignEnvironment(result);
}
@@ -2261,8 +2260,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index a938ee56ba..85c04685ba 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -66,6 +66,7 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@@ -78,20 +79,23 @@ class LCodeGen;
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
+ V(Drop) \
V(DummyUse) \
V(ElementsKind) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -99,13 +103,13 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
- V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -118,6 +122,7 @@ class LCodeGen;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -180,16 +185,10 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop) \
- V(InnerAllocatedObject)
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -424,6 +423,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -857,20 +857,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ef90c10df0..2ab5a25932 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1248,6 +1248,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2798,7 +2799,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
// Check that both are flat ASCII strings.
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
and_(scratch1, kFlatAsciiStringMask);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 2b391e0b33..123506fa62 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -816,11 +816,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- if (details.type() == CONSTANT_FUNCTION) {
- Handle<HeapObject> constant(
- HeapObject::cast(descriptors->GetValue(descriptor)));
- __ LoadHeapObject(scratch1, constant);
- __ cmp(value_reg, scratch1);
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -897,7 +895,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- if (details.type() == CONSTANT_FUNCTION) {
+ if (details.type() == CONSTANT) {
ASSERT(value_reg.is(eax));
__ ret(0);
return;
@@ -1428,9 +1426,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(eax, value);
+ __ LoadObject(eax, value);
__ ret(0);
}
@@ -2727,7 +2725,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Code> code = CompileCustomCall(object, holder,
Handle<Cell>::null(),
function, Handle<String>::cast(name),
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index f0f5c302b3..269754b7d6 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -634,7 +634,7 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state) {
ASSERT(kind_ == Code::CALL_IC);
- if (lookup->type() != CONSTANT_FUNCTION) return false;
+ if (!lookup->IsConstantFunction()) return false;
JSFunction* function = lookup->GetConstantFunction();
if (!function->shared()->HasBuiltinFunctionId()) return false;
@@ -687,7 +687,8 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
return isolate()->stub_cache()->ComputeCallField(
argc, kind_, extra_state, name, object, holder, index);
}
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
+ if (!lookup->IsConstantFunction()) return Handle<Code>::null();
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
@@ -1312,8 +1313,11 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
return isolate()->stub_cache()->ComputeLoadField(
name, receiver, holder,
lookup->GetFieldIndex(), lookup->representation());
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
+ case CONSTANT: {
+ Handle<Object> constant(lookup->GetConstant(), isolate());
+ // TODO(2803): Don't compute a stub for cons strings because they cannot
+ // be embedded into code.
+ if (constant->IsConsString()) return Handle<Code>::null();
return isolate()->stub_cache()->ComputeLoadConstant(
name, receiver, holder, constant);
}
@@ -1522,8 +1526,11 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
return isolate()->stub_cache()->ComputeKeyedLoadField(
name, receiver, holder,
lookup->GetFieldIndex(), lookup->representation());
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction(), isolate());
+ case CONSTANT: {
+ Handle<Object> constant(lookup->GetConstant(), isolate());
+ // TODO(2803): Don't compute a stub for cons strings because they cannot
+ // be embedded into code.
+ if (constant->IsConsString()) return Handle<Code>::null();
return isolate()->stub_cache()->ComputeKeyedLoadConstant(
name, receiver, holder, constant);
}
@@ -1798,7 +1805,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
return isolate()->stub_cache()->ComputeStoreInterceptor(
name, receiver, strict_mode);
- case CONSTANT_FUNCTION:
+ case CONSTANT:
break;
case TRANSITION: {
// Explicitly pass in the receiver map since LookupForWrite may have
@@ -2184,7 +2191,7 @@ Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
// fall through.
}
case NORMAL:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
case CALLBACKS:
case INTERCEPTOR:
// Always rewrite to the generic case so that we do not
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 4adcd69d3f..ccd6f280dc 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -95,7 +95,7 @@ void ThreadLocalTop::InitializeInternal() {
simulator_ = NULL;
#endif
js_entry_sp_ = NULL;
- external_callback_ = NULL;
+ external_callback_scope_ = NULL;
current_vm_state_ = EXTERNAL;
try_catch_handler_address_ = NULL;
context_ = NULL;
@@ -1777,7 +1777,6 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
- context_exit_happened_(false),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 6e5d5c674a..065277093e 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -65,6 +65,7 @@ class CpuProfiler;
class DeoptimizerData;
class Deserializer;
class EmptyStatement;
+class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
class FunctionInfoListener;
@@ -279,7 +280,8 @@ class ThreadLocalTop BASE_EMBEDDED {
#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
- Address external_callback_; // the external callback we're currently in
+ // the external callback we're currently in
+ ExternalCallbackScope* external_callback_scope_;
StateTag current_vm_state_;
// Generated code scratch locations.
@@ -1032,11 +1034,11 @@ class Isolate {
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- Address external_callback() {
- return thread_local_top_.external_callback_;
+ ExternalCallbackScope* external_callback_scope() {
+ return thread_local_top_.external_callback_scope_;
}
- void set_external_callback(Address callback) {
- thread_local_top_.external_callback_ = callback;
+ void set_external_callback_scope(ExternalCallbackScope* scope) {
+ thread_local_top_.external_callback_scope_ = scope;
}
StateTag current_vm_state() {
@@ -1057,13 +1059,6 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
-
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
@@ -1311,10 +1306,6 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
-
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 3df8d6cc29..e9c3531e38 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -270,7 +270,7 @@ int StackSlotOffset(int index) {
return -(index + 3) * kPointerSize;
} else {
// Incoming parameter. Skip the return address.
- return -(index - 1) * kPointerSize;
+ return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
}
}
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index bab2e101bc..b998a26dd7 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1290,6 +1290,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
+ shared_info->DisableOptimization("LiveEdit");
}
if (shared_info->debug_info()->IsDebugInfo()) {
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index d26279bb24..520723e342 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -54,6 +54,14 @@ static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
#undef DECLARE_EVENT
+#define PROFILER_LOG(Call) \
+ do { \
+ CpuProfiler* cpu_profiler = isolate_->cpu_profiler(); \
+ if (cpu_profiler->is_profiling()) { \
+ cpu_profiler->Call; \
+ } \
+ } while (false);
+
// ComputeMarker must only be used when SharedFunctionInfo is known.
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
@@ -543,7 +551,7 @@ class JitLogger : public CodeEventLogger {
public:
explicit JitLogger(JitCodeEventHandler code_event_handler);
- void CodeMovedEvent(Address from, Address to);
+ void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void AddCodeLinePosInfoEvent(
void* jit_handler_data,
@@ -588,7 +596,7 @@ void JitLogger::LogRecordedBuffer(Code* code,
}
-void JitLogger::CodeMovedEvent(Address from, Address to) {
+void JitLogger::CodeMoveEvent(Address from, Address to) {
Code* from_code = Code::cast(HeapObject::FromAddress(from));
JitCodeEvent event;
@@ -1209,7 +1217,7 @@ void Logger::DeleteEventStatic(const char* name, void* object) {
void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,%s,-2,",
kLogEventsNames[CODE_CREATION_EVENT],
@@ -1235,19 +1243,19 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
void Logger::CallbackEvent(Name* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ PROFILER_LOG(CallbackEvent(name, entry_point));
CallbackEventInternal("", name, entry_point);
}
void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ PROFILER_LOG(GetterCallbackEvent(name, entry_point));
CallbackEventInternal("get ", name, entry_point);
}
void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ PROFILER_LOG(SetterCallbackEvent(name, entry_point));
CallbackEventInternal("set ", name, entry_point);
}
@@ -1268,8 +1276,9 @@ static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(CodeCreateEvent(tag, code, comment));
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeCreateEvent(tag, code, comment));
LL_LOG(CodeCreateEvent(tag, code, comment));
CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, comment));
@@ -1286,8 +1295,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
Name* name) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(CodeCreateEvent(tag, code, name));
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeCreateEvent(tag, code, name));
LL_LOG(CodeCreateEvent(tag, code, name));
CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, name));
@@ -1312,8 +1322,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* name) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeCreateEvent(tag, code, shared, info, name));
LL_LOG(CodeCreateEvent(tag, code, shared, info, name));
CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, name));
@@ -1348,8 +1359,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source, int line) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
LL_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
@@ -1378,8 +1390,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
int args_count) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeCreateEvent(tag, code, args_count));
LL_LOG(CodeCreateEvent(tag, code, args_count));
CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, args_count));
@@ -1394,6 +1407,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeMovingGCEvent() {
+ PROFILER_LOG(CodeMovingGCEvent());
+
+ if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
LL_LOG(CodeMovingGCEvent());
OS::SignalCodeMovingGC();
@@ -1401,8 +1417,9 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
- if (!is_logging_code_events()) return;
+ PROFILER_LOG(RegExpCodeCreateEvent(code, source));
+ if (!is_logging_code_events()) return;
JIT_LOG(RegExpCodeCreateEvent(code, source));
LL_LOG(RegExpCodeCreateEvent(code, source));
CODE_ADDRESS_MAP_LOG(RegExpCodeCreateEvent(code, source));
@@ -1419,8 +1436,10 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
- JIT_LOG(CodeMovedEvent(from, to));
- if (!log_->IsEnabled()) return;
+ PROFILER_LOG(CodeMoveEvent(from, to));
+
+ if (!is_logging_code_events()) return;
+ JIT_LOG(CodeMoveEvent(from, to));
LL_LOG(CodeMoveEvent(from, to));
CODE_ADDRESS_MAP_LOG(CodeMoveEvent(from, to));
MoveEventInternal(CODE_MOVE_EVENT, from, to);
@@ -1428,12 +1447,14 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
+ PROFILER_LOG(CodeDeleteEvent(from));
+
+ if (!is_logging_code_events()) return;
JIT_LOG(CodeDeleteEvent(from));
- if (!log_->IsEnabled()) return;
LL_LOG(CodeDeleteEvent(from));
CODE_ADDRESS_MAP_LOG(CodeDeleteEvent(from));
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
msg.AppendAddress(from);
@@ -1498,6 +1519,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
+ PROFILER_LOG(SharedFunctionInfoMoveEvent(from, to));
+
+ if (!is_logging_code_events()) return;
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
}
@@ -1505,7 +1529,7 @@ void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 95f673c2a4..59492e1643 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -73,8 +73,8 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_collections_(NULL) { }
-
+ encountered_weak_collections_(NULL),
+ code_to_deoptimize_(NULL) { }
#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
@@ -492,7 +492,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
obj = code_iterator.Next()) {
Code* code = Code::cast(obj);
if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
- if (code->marked_for_deoptimization()) continue;
+ if (WillBeDeoptimized(code)) continue;
code->VerifyEmbeddedMapsDependency();
}
}
@@ -945,14 +945,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
-class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
- }
-};
-
-
void MarkCompactCollector::Finish() {
#ifdef DEBUG
ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -964,8 +956,23 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
isolate()->stub_cache()->Clear();
- DeoptimizeMarkedCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter);
+ if (code_to_deoptimize_ != Smi::FromInt(0)) {
+ // Convert the linked list of Code objects into a ZoneList.
+ Zone zone(isolate());
+ ZoneList<Code*> codes(4, &zone);
+
+ Object *list = code_to_deoptimize_;
+ while (list->IsCode()) {
+ Code *code = Code::cast(list);
+ list = code->code_to_deoptimize_link();
+ codes.Add(code, &zone);
+ // Destroy the link and don't ever try to deoptimize this code again.
+ code->set_code_to_deoptimize_link(Smi::FromInt(0));
+ }
+ code_to_deoptimize_ = Smi::FromInt(0);
+
+ Deoptimizer::DeoptimizeCodeList(isolate(), &codes);
+ }
}
@@ -2396,7 +2403,6 @@ void MarkCompactCollector::AfterMarking() {
string_table->ElementsRemoved(v.PointersRemoved());
heap()->external_string_table_.Iterate(&v);
heap()->external_string_table_.CleanUp();
- heap()->error_object_list_.RemoveUnmarked(heap());
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
@@ -2611,8 +2617,17 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
// and ClearAndDeoptimizeDependentCode shouldn't be called.
ASSERT(entries->is_code_at(i));
Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
- code->set_marked_for_deoptimization(true);
+
+ if (IsMarked(code) && !WillBeDeoptimized(code)) {
+ // Insert the code into the code_to_deoptimize linked list.
+ Object* next = code_to_deoptimize_;
+ if (next != Smi::FromInt(0)) {
+ // Record the slot so that it is updated.
+ Object** slot = code->code_to_deoptimize_link_slot();
+ RecordSlot(slot, slot, next);
+ }
+ code->set_code_to_deoptimize_link(next);
+ code_to_deoptimize_ = code;
}
entries->clear_at(i);
}
@@ -2633,7 +2648,7 @@ void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
Object* obj = entries->object_at(i);
ASSERT(obj->IsCode() || IsMarked(obj));
if (IsMarked(obj) &&
- (!obj->IsCode() || !Code::cast(obj)->marked_for_deoptimization())) {
+ (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
if (new_number_of_entries + group_number_of_entries != i) {
entries->set_object_at(
new_number_of_entries + group_number_of_entries, obj);
@@ -2723,7 +2738,13 @@ void MarkCompactCollector::MigrateObject(Address dst,
int size,
AllocationSpace dest) {
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+ // TODO(hpayer): Replace that check with an assert.
+ CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ if (dest == OLD_POINTER_SPACE) {
+ // TODO(hpayer): Replace this check with an assert.
+ HeapObject* heap_object = HeapObject::FromAddress(src);
+ CHECK(heap_object->IsExternalString() ||
+ heap_->TargetSpace(heap_object) == heap_->old_pointer_space());
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2769,6 +2790,13 @@ void MarkCompactCollector::MigrateObject(Address dst,
Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+ // Objects in old data space can just be moved by compaction to a different
+ // page in old data space.
+ // TODO(hpayer): Replace the following check with an assert.
+ CHECK(!heap_->old_data_space()->Contains(src) ||
+ (heap_->old_data_space()->Contains(dst) &&
+ heap_->TargetSpace(HeapObject::FromAddress(src)) ==
+ heap_->old_data_space()));
heap()->MoveBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
@@ -2895,37 +2923,24 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
- Object* result;
+ // TODO(hpayer): Replace that check with an assert.
+ CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) {
- MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- LO_SPACE);
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
- } else {
- OldSpace* target_space = heap()->TargetSpace(object);
-
- ASSERT(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- target_space->identity());
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
- return true;
- }
+ OldSpace* target_space = heap()->TargetSpace(object);
+
+ ASSERT(target_space == heap()->old_pointer_space() ||
+ target_space == heap()->old_data_space());
+ Object* result;
+ MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+ if (maybe_result->ToObject(&result)) {
+ HeapObject* target = HeapObject::cast(result);
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space->identity());
+ heap()->mark_compact_collector()->tracer()->
+ increment_promoted_objects_size(object_size);
+ return true;
}
return false;
@@ -3271,6 +3286,16 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
}
+// Return true if the given code is deoptimized or will be deoptimized.
+bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
+ // We assume the code_to_deoptimize_link is initialized to undefined.
+ // If it is 0, or refers to another Code object, then this code
+ // is already linked, or was already linked into the list.
+ return code->code_to_deoptimize_link() != heap()->undefined_value()
+ || code->marked_for_deoptimization();
+}
+
+
bool MarkCompactCollector::MarkInvalidatedCode() {
bool code_marked = false;
@@ -3454,8 +3479,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update pointer from the native contexts list.
+ // Update the heads of the native contexts list the code to deoptimize list.
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
+ updating_visitor.VisitPointer(&code_to_deoptimize_);
heap_->string_table()->Iterate(&updating_visitor);
@@ -3463,9 +3489,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
- // Update pointers in the new error object list.
- heap_->error_object_list()->UpdateReferences();
-
if (!FLAG_watch_ic_patching) {
// Update JSFunction pointers from the runtime profiler.
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 4063bde2d3..3c4dfb688f 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -743,6 +743,7 @@ class MarkCompactCollector {
~MarkCompactCollector();
bool MarkInvalidatedCode();
+ bool WillBeDeoptimized(Code* code);
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
@@ -946,6 +947,7 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_collections_;
+ Object* code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 761b311371..8b647dd205 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -1145,24 +1145,29 @@ function captureStackTrace(obj, cons_opt) {
}
var error_string = FormatErrorString(obj);
- // Note that 'obj' and 'this' maybe different when called on objects that
- // have the error object on its prototype chain. The getter replaces itself
- // with a data property as soon as the stack trace has been formatted.
- // The getter must not change the object layout as it may be called after GC.
+ // The holder of this getter ('obj') may not be the receiver ('this').
+ // When this getter is called the first time, we use the context values to
+ // format a stack trace string and turn this accessor pair into a data
+ // property (on the holder).
var getter = function() {
- if (IS_STRING(stack)) return stack;
// Stack is still a raw array awaiting to be formatted.
- stack = FormatStackTrace(error_string, GetStackFrames(stack));
- // Release context value.
- error_string = void 0;
- return stack;
+ var result = FormatStackTrace(error_string, GetStackFrames(stack));
+ // Turn this accessor into a data property.
+ %DefineOrRedefineDataProperty(obj, 'stack', result, NONE);
+ // Release context values.
+ stack = error_string = void 0;
+ return result;
};
- %MarkOneShotGetter(getter);
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
+ // Set the 'stack' property on the receiver. If the receiver is the same as
+ // holder of this setter, the accessor pair is turned into a data property.
var setter = function(v) {
+ // Set data property on the receiver (not necessarily holder).
%DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ if (this === obj) {
+ // Release context values if holder is the same as the receiver.
+ stack = error_string = void 0;
+ }
};
%DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
@@ -1300,38 +1305,36 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
function SetUpStackOverflowBoilerplate() {
var boilerplate = MakeRangeError('stack_overflow', []);
- // The raw stack trace is stored as hidden property of the copy of this
- // boilerplate error object. Note that the receiver 'this' may not be that
- // error object copy, but can be found on the prototype chain of 'this'.
- // When the stack trace is formatted, this accessor property is replaced by
- // a data property.
var error_string = boilerplate.name + ": " + boilerplate.message;
- // The getter must not change the object layout as it may be called after GC.
- function getter() {
+ // The raw stack trace is stored as a hidden property on the holder of this
+ // getter, which may not be the same as the receiver. Find the holder to
+ // retrieve the raw stack trace and then turn this accessor pair into a
+ // data property.
+ var getter = function() {
var holder = this;
while (!IS_ERROR(holder)) {
holder = %GetPrototype(holder);
- if (holder == null) return MakeSyntaxError('illegal_access', []);
+ if (IS_NULL(holder)) return MakeSyntaxError('illegal_access', []);
}
- var stack = %GetOverflowedStackTrace(holder);
- if (IS_STRING(stack)) return stack;
- if (IS_ARRAY(stack)) {
- var result = FormatStackTrace(error_string, GetStackFrames(stack));
- %SetOverflowedStackTrace(holder, result);
- return result;
- }
- return void 0;
- }
- %MarkOneShotGetter(getter);
+ var stack = %GetAndClearOverflowedStackTrace(holder);
+ // We may not have captured any stack trace.
+ if (IS_UNDEFINED(stack)) return stack;
+
+ var result = FormatStackTrace(error_string, GetStackFrames(stack));
+ // Replace this accessor with a data property.
+ %DefineOrRedefineDataProperty(holder, 'stack', result, NONE);
+ return result;
+ };
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
- function setter(v) {
+ // Set the 'stack' property on the receiver. If the receiver is the same as
+ // holder of this setter, the accessor pair is turned into a data property.
+ var setter = function(v) {
%DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- // Release the stack trace that is stored as hidden property, if exists.
- %SetOverflowedStackTrace(this, void 0);
- }
+ // Tentatively clear the hidden property. If the receiver is the same as
+ // holder, we release the raw stack trace this way.
+ %GetAndClearOverflowedStackTrace(this);
+ };
%DefineOrRedefineAccessorProperty(
boilerplate, 'stack', getter, setter, DONT_ENUM);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index f984b3a7b7..0e1b224ead 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -39,6 +39,16 @@ namespace v8 {
namespace internal {
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -287,16 +297,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
-}
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Isolate* isolate = masm->isolate();
@@ -321,24 +321,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(a0, &check_heap_number);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- __ bind(&call_builtin);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 840462e43f..57d3880ede 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -43,22 +43,8 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
- // The optimized code is going to be patched, so we cannot use it any more.
- function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
-
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -89,30 +75,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
prev_call_address = call_address;
#endif
}
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
- }
}
@@ -648,6 +610,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
count() * table_entry_size_);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 65b4a575f7..5cf1d59e49 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -271,6 +271,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
return !is_aborted();
}
@@ -410,11 +411,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ li(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ __ LoadObject(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -480,9 +477,18 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
@@ -504,7 +510,10 @@ Operand LCodeGen::ToOperand(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
@@ -789,14 +798,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment,
- Register src1,
- const Operand& src2) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(al, environment, Deoptimizer::SOFT, src1, src2);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1378,7 +1379,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (right_op->IsConstantOperand() && !can_overflow) {
// Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+ int32_t constant = ToRepresentation(
+ LConstantOperand::cast(right_op),
+ instr->hydrogen()->right()->representation());
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1445,13 +1448,25 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (can_overflow) {
// hi:lo = left * right.
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mult(result, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ } else {
+ __ mult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ }
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
- __ Mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
@@ -1635,12 +1650,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- __ li(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
+ __ LoadObject(ToRegister(instr->result()), value);
}
@@ -1819,7 +1829,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
? ToOperand(right)
@@ -2239,13 +2249,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- EmitBranch(instr, eq, left, Operand(instr->hydrogen()->right()));
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2900,9 +2903,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -4186,9 +4189,25 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc,
+ LBoundsCheck* check,
+ Register src1,
+ const Operand& src2) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ Branch(&done, NegateCondition(cc), src1, src2);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment(), src1, src2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -4197,13 +4216,13 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
__ li(at, Operand(constant_index));
}
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
at,
Operand(ToRegister(instr->length())));
} else {
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
ToRegister(instr->index()),
Operand(ToRegister(instr->length())));
}
@@ -5194,6 +5213,7 @@ void LCodeGen::DoCheckMapCommon(Register map_reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
@@ -5262,6 +5282,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@@ -5270,12 +5292,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
}
}
@@ -5323,6 +5343,25 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ li(scratch, Operand(size));
+ } else {
+ scratch = ToRegister(instr->size());
+ }
+ __ Subu(scratch, scratch, Operand(kPointerSize));
+ __ Subu(result, result, Operand(kHeapObjectTag));
+ Label loop;
+ __ bind(&loop);
+ __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ Addu(at, result, Operand(scratch));
+ __ sw(scratch2, MemOperand(at));
+ __ Subu(scratch, scratch, Operand(kPointerSize));
+ __ Branch(&loop, ge, scratch, Operand(zero_reg));
+ __ Addu(result, result, Operand(kHeapObjectTag));
+ }
}
@@ -5615,12 +5654,12 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
padding_size -= Assembler::kInstrSize;
}
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5629,11 +5668,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment(), zero_reg, Operand(zero_reg));
- } else {
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
}
+ DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}
@@ -5676,6 +5719,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
StackCheckStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5687,6 +5731,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 1cba8cf468..a485b67db9 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -114,7 +114,8 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@@ -284,9 +285,10 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void SoftDeoptimize(LEnvironment* environment,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
+ void ApplyCheckIf(Condition cc,
+ LBoundsCheck* check,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(Translation* translation,
LOperand* op,
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 9705e1f41a..771b22862e 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -251,10 +251,10 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ li(dst, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -265,11 +265,11 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsSmi(constant_source)) {
- __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32(constant_source)) {
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg,
- Operand(cgen_->ToInteger32(constant_source)));
+ Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(kLithiumScratchReg,
cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index c64533cdfc..b03cea44cb 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -706,11 +706,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -788,8 +783,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result =
@@ -1320,17 +1315,17 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
@@ -1352,7 +1347,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ } else if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LDivI* div = new(zone()) LDivI(dividend, divisor);
@@ -1419,9 +1416,9 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- if (instr->representation().IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
@@ -1449,7 +1446,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
+ } else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
@@ -1465,9 +1462,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left;
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
@@ -1510,9 +1507,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1539,9 +1536,9 @@ LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
@@ -1562,7 +1559,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1571,9 +1568,9 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1652,13 +1649,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1956,9 +1946,14 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
+ LUnallocated* temp1 = NULL;
+ LOperand* temp2 = NULL;
+ if (!instr->CanOmitPrototypeChecks()) {
+ temp1 = TempRegister();
+ temp2 = TempRegister();
+ }
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ if (instr->CanOmitPrototypeChecks()) return result;
return AssignEnvironment(result);
}
@@ -1970,8 +1965,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
return AssignEnvironment(result);
}
@@ -2128,8 +2125,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 83a37c6230..2b55906352 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -40,12 +40,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
@@ -72,6 +66,7 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@@ -79,7 +74,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
@@ -89,14 +83,18 @@ class LCodeGen;
V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
+ V(Drop) \
V(DummyUse) \
V(ElementsKind) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -104,13 +102,13 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
- V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -123,6 +121,7 @@ class LCodeGen;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -185,17 +184,10 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop) \
- V(InnerAllocatedObject)
-
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -431,6 +423,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -888,20 +881,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 8a44185ed7..ea08a552be 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -2882,6 +2882,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4968,9 +4969,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
andi(scratch1, first, kFlatAsciiStringMask);
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
@@ -4982,9 +4984,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch, type, Operand(kFlatAsciiStringMask));
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
}
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 89d8e68d5e..c4b1ee57a7 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -470,10 +470,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- if (details.type() == CONSTANT_FUNCTION) {
- Handle<HeapObject> constant(
- HeapObject::cast(descriptors->GetValue(descriptor)));
- __ LoadHeapObject(scratch1, constant);
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -532,7 +531,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- if (details.type() == CONSTANT_FUNCTION) {
+ if (details.type() == CONSTANT) {
ASSERT(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
@@ -1404,9 +1403,9 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(v0, value);
+ __ LoadObject(v0, value);
__ Ret();
}
@@ -2709,7 +2708,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Code> code = CompileCustomCall(object, holder,
Handle<Cell>::null(),
function, Handle<String>::cast(name),
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 28ef24de0f..3b360bb5d7 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -173,7 +173,7 @@ PropertyKind.Indexed = 2;
var PropertyType = {};
PropertyType.Normal = 0;
PropertyType.Field = 1;
-PropertyType.ConstantFunction = 2;
+PropertyType.Constant = 2;
PropertyType.Callbacks = 3;
PropertyType.Handler = 4;
PropertyType.Interceptor = 5;
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index c12a12a6bc..3189d84030 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -2352,8 +2352,8 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
}
-JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
- return JSFunction::cast(GetValue(descriptor_number));
+Object* DescriptorArray::GetConstant(int descriptor_number) {
+ return GetValue(descriptor_number);
}
@@ -3648,7 +3648,7 @@ bool Map::CanBeDeprecated() {
details.representation().IsHeapObject()) {
return true;
}
- if (FLAG_track_fields && details.type() == CONSTANT_FUNCTION) {
+ if (FLAG_track_fields && details.type() == CONSTANT) {
return true;
}
}
@@ -3669,6 +3669,12 @@ bool Map::CanOmitPrototypeChecks() {
}
+bool Map::CanOmitMapChecks() {
+ return !HasTransitionArray() && !is_dictionary_map() &&
+ FLAG_omit_map_checks_for_leaf_maps;
+}
+
+
int DependentCode::number_of_entries(DependencyGroup group) {
if (length() == 0) return 0;
return Smi::cast(get(group))->value();
@@ -5233,15 +5239,22 @@ void Code::set_stub_info(int value) {
}
-void Code::set_deoptimizing_functions(Object* value) {
+Object* Code::code_to_deoptimize_link() {
+ // Optimized code should not have type feedback.
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return READ_FIELD(this, kTypeFeedbackInfoOffset);
+}
+
+
+void Code::set_code_to_deoptimize_link(Object* value) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
}
-Object* Code::deoptimizing_functions() {
+Object** Code::code_to_deoptimize_link_slot() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+ return HeapObject::RawField(this, kTypeFeedbackInfoOffset);
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 91b1c2ec43..2327cbae8a 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -294,9 +294,9 @@ void JSObject::PrintProperties(FILE* out) {
PrintF(out, " (field at offset %d)\n", index);
break;
}
- case CONSTANT_FUNCTION:
- descs->GetConstantFunction(i)->ShortPrint(out);
- PrintF(out, " (constant function)\n");
+ case CONSTANT:
+ descs->GetConstant(i)->ShortPrint(out);
+ PrintF(out, " (constant)\n");
break;
case CALLBACKS:
descs->GetCallbacksObject(i)->ShortPrint(out);
@@ -450,8 +450,8 @@ void JSObject::PrintTransitions(FILE* out) {
PrintF(out, " (transition to field)\n");
break;
}
- case CONSTANT_FUNCTION:
- PrintF(out, " (transition to constant function)\n");
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
break;
case CALLBACKS:
PrintF(out, " (transition to callback)\n");
@@ -1262,8 +1262,8 @@ void TransitionArray::PrintTransitions(FILE* out) {
PrintF(out, " (transition to field)\n");
break;
}
- case CONSTANT_FUNCTION:
- PrintF(out, " (transition to constant function)\n");
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
break;
case CALLBACKS:
PrintF(out, " (transition to callback)\n");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 1967b1324d..d2ffb9c3c8 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -518,7 +518,7 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
}
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
// Search ALL_CAN_READ accessors in prototype chain.
LookupResult r(GetIsolate());
result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
@@ -579,7 +579,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
if (!continue_search) break;
// Search ALL_CAN_READ accessors in prototype chain.
LookupResult r(GetIsolate());
@@ -874,8 +874,8 @@ MaybeObject* Object::GetProperty(Object* receiver,
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
}
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
+ case CONSTANT:
+ return result->GetConstant();
case CALLBACKS:
return result->holder()->GetPropertyWithCallback(
receiver, result->GetCallbackObject(), name);
@@ -1927,12 +1927,12 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
}
-MaybeObject* JSObject::AddConstantFunctionProperty(
+MaybeObject* JSObject::AddConstantProperty(
Name* name,
- JSFunction* function,
+ Object* constant,
PropertyAttributes attributes) {
- // Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes);
+ // Allocate new instance descriptors with (name, constant) added
+ ConstantDescriptor d(name, constant, attributes);
TransitionFlag flag =
// Do not add transitions to global objects.
@@ -1948,7 +1948,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
set_map(new_map);
- return function;
+ return constant;
}
@@ -2000,7 +2000,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
StrictModeFlag strict_mode,
JSReceiver::StoreFromKeyed store_mode,
ExtensibilityCheck extensibility_check,
- ValueType value_type) {
+ ValueType value_type,
+ StoreMode mode) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -2022,10 +2023,12 @@ MaybeObject* JSObject::AddProperty(Name* name,
// Ensure the descriptor array does not get too big.
if (map_of_this->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
+ // TODO(verwaest): Support other constants.
+ // if (mode == ALLOW_AS_CONSTANT &&
+ // !value->IsTheHole() &&
+ // !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
+ result = AddConstantProperty(name, value, attributes);
} else {
result = AddFastProperty(
name, value, attributes, store_mode, value_type);
@@ -2095,7 +2098,8 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check) {
+ ExtensibilityCheck extensibility_check,
+ StoreMode mode) {
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
@@ -2112,7 +2116,8 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
if (done) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check);
+ MAY_BE_STORE_FROM_KEYED, extensibility_check,
+ OPTIMAL_REPRESENTATION, mode);
}
@@ -2377,9 +2382,9 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
- ASSERT(old_details.type() == CONSTANT_FUNCTION ||
+ ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
- Object* value = old_details.type() == CONSTANT_FUNCTION
+ Object* value = old_details.type() == CONSTANT
? old_descriptors->GetValue(i)
: RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
if (FLAG_track_double_fields &&
@@ -2994,7 +2999,7 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
switch (result.type()) {
case NORMAL:
case FIELD:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
*done = result.IsReadOnly();
break;
case INTERCEPTOR: {
@@ -3867,13 +3872,13 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
result = *value;
break;
}
- case CONSTANT_FUNCTION:
- // Only replace the function if necessary.
- if (*value == lookup->GetConstantFunction()) return *value;
+ case CONSTANT:
+ // Only replace the constant if necessary.
+ if (*value == lookup->GetConstant()) return *value;
// Preserve the attributes of this existing property.
attributes = lookup->GetAttributes();
- result =
- lookup->holder()->ConvertDescriptorToField(*name, *value, attributes);
+ result = lookup->holder()->ConvertDescriptorToField(
+ *name, *value, attributes);
break;
case CALLBACKS: {
Object* callback_object = lookup->GetCallbackObject();
@@ -3919,14 +3924,14 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
result = lookup->holder()->ConvertDescriptorToField(
*name, *value, attributes);
} else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
+ ASSERT(details.type() == CONSTANT);
- Object* constant_function = descriptors->GetValue(descriptor);
- if (constant_function == *value) {
+ Object* constant = descriptors->GetValue(descriptor);
+ if (constant == *value) {
// If the same constant function is being added we can simply
// transition to the target map.
lookup->holder()->set_map(transition_map);
- result = constant_function;
+ result = constant;
} else {
// Otherwise, replace with a map transition to a new map with a FIELD,
// even if the value is a constant function.
@@ -3977,11 +3982,12 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- ValueType value_type) {
+ ValueType value_type,
+ StoreMode mode) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type),
+ *key, *value, attributes, value_type, mode),
Object);
}
@@ -3990,7 +3996,8 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Name* name_raw,
Object* value_raw,
PropertyAttributes attributes,
- ValueType value_type) {
+ ValueType value_type,
+ StoreMode mode) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -4017,7 +4024,8 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
name_raw,
value_raw,
attributes,
- value_type);
+ value_type,
+ mode);
}
// Check for accessor in prototype chain removed here in clone.
@@ -4025,7 +4033,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// Neither properties nor transitions found.
return AddProperty(
name_raw, value_raw, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type);
+ MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type, mode);
}
// From this point on everything needs to be handlified.
@@ -4075,9 +4083,9 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
result = *value;
break;
}
- case CONSTANT_FUNCTION:
+ case CONSTANT:
// Only replace the function if necessary.
- if (*value != lookup.GetConstantFunction()) {
+ if (*value != lookup.GetConstant()) {
// Preserve the attributes of this existing property.
attributes = lookup.GetAttributes();
result = self->ConvertDescriptorToField(*name, *value, attributes);
@@ -4122,7 +4130,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
} else if (details.type() == CALLBACKS) {
result = self->ConvertDescriptorToField(*name, *value, attributes);
} else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
+ ASSERT(details.type() == CONSTANT);
// Replace transition to CONSTANT FUNCTION with a map transition to a
// new map with a FIELD, even if the value is a function.
@@ -4264,7 +4272,7 @@ PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
switch (lookup->type()) {
case NORMAL: // fall through
case FIELD:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
case CALLBACKS:
return lookup->GetAttributes();
case HANDLER: {
@@ -4504,10 +4512,10 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
- case CONSTANT_FUNCTION: {
+ case CONSTANT: {
PropertyDetails d = PropertyDetails(
details.attributes(), NORMAL, i + 1);
- Object* value = descs->GetConstantFunction(i);
+ Object* value = descs->GetConstant(i);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
@@ -4949,7 +4957,8 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
hashtable,
DONT_ENUM,
kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
+ OMIT_EXTENSIBILITY_CHECK,
+ FORCE_FIELD);
if (store_result->IsFailure()) return store_result;
return hashtable;
}
@@ -4981,7 +4990,8 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
value,
DONT_ENUM,
kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK);
+ OMIT_EXTENSIBILITY_CHECK,
+ FORCE_FIELD);
if (store_result->IsFailure()) return store_result;
return this;
}
@@ -6457,8 +6467,8 @@ Object* JSObject::SlowReverseLookup(Object* value) {
} else if (property == value) {
return descs->GetKey(i);
}
- } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
- if (descs->GetConstantFunction(i) == value) {
+ } else if (descs->GetType(i) == CONSTANT) {
+ if (descs->GetConstant(i) == value) {
return descs->GetKey(i);
}
}
@@ -7800,8 +7810,8 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
PropertyDetails other_details = other->GetDetails(descriptor);
if (details.type() == FIELD || other_details.type() == FIELD ||
- (details.type() == CONSTANT_FUNCTION &&
- other_details.type() == CONSTANT_FUNCTION &&
+ (details.type() == CONSTANT &&
+ other_details.type() == CONSTANT &&
GetValue(descriptor) != other->GetValue(descriptor))) {
Representation representation =
details.representation().generalize(other_details.representation());
@@ -7850,8 +7860,8 @@ bool DescriptorArray::IsMoreGeneralThan(int verbatim,
if (!other_details.representation().fits_into(details.representation())) {
return false;
}
- if (details.type() == CONSTANT_FUNCTION) {
- if (other_details.type() != CONSTANT_FUNCTION) return false;
+ if (details.type() == CONSTANT) {
+ if (other_details.type() != CONSTANT) return false;
if (GetValue(descriptor) != other->GetValue(descriptor)) return false;
}
}
@@ -9224,6 +9234,7 @@ void JSFunction::MarkForLazyRecompilation() {
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
+ ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
// No write barrier required, since the builtin is part of the root set.
@@ -9234,10 +9245,8 @@ void JSFunction::MarkForParallelRecompilation() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- if (!FLAG_parallel_recompilation) {
- JSFunction::MarkForLazyRecompilation();
- return;
- }
+ ASSERT(!shared()->is_generator());
+ ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Marking ");
PrintName();
@@ -10688,7 +10697,7 @@ const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
case FIELD: return "FIELD";
- case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
+ case CONSTANT: return "CONSTANT";
case CALLBACKS: return "CALLBACKS";
case INTERCEPTOR: return "INTERCEPTOR";
case MAP_TRANSITION: return "MAP_TRANSITION";
@@ -11348,14 +11357,6 @@ bool DependentCode::Contains(DependencyGroup group, Code* code) {
}
-class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
- }
-};
-
-
void DependentCode::DeoptimizeDependentCodeGroup(
Isolate* isolate,
DependentCode::DependencyGroup group) {
@@ -11365,10 +11366,14 @@ void DependentCode::DeoptimizeDependentCodeGroup(
int end = starts.at(group + 1);
int code_entries = starts.number_of_entries();
if (start == end) return;
+
+ // Collect all the code to deoptimize.
+ Zone zone(isolate);
+ ZoneList<Code*> codes(end - start, &zone);
for (int i = start; i < end; i++) {
if (is_code_at(i)) {
Code* code = code_at(i);
- code->set_marked_for_deoptimization(true);
+ if (!code->marked_for_deoptimization()) codes.Add(code, &zone);
} else {
CompilationInfo* info = compilation_info_at(i);
info->AbortDueToDependencyChange();
@@ -11384,8 +11389,7 @@ void DependentCode::DeoptimizeDependentCodeGroup(
clear_at(i);
}
set_number_of_entries(group, 0);
- DeoptimizeDependentCodeFilter filter;
- Deoptimizer::DeoptimizeAllFunctionsWith(isolate, &filter);
+ Deoptimizer::DeoptimizeCodeList(isolate, &codes);
}
@@ -15269,9 +15273,7 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
PropertyType type = details.type();
if (value->IsJSFunction()) {
- ConstantFunctionDescriptor d(key,
- JSFunction::cast(value),
- details.attributes());
+ ConstantDescriptor d(key, value, details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
@@ -15918,10 +15920,11 @@ Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
Handle<Object> value) {
Isolate* isolate = cell->GetIsolate();
Handle<Type> old_type(cell->type(), isolate);
- Handle<Type> new_type((value->IsSmi() || value->IsJSFunction() ||
- value->IsUndefined())
- ? Type::Constant(value, isolate)
- : Type::Any(), isolate);
+ // TODO(2803): Do not track ConsString as constant because they cannot be
+ // embedded into code.
+ Handle<Type> new_type(value->IsConsString() || value->IsTheHole()
+ ? Type::Any()
+ : Type::Constant(value, isolate), isolate);
if (new_type->Is(old_type)) {
return *old_type;
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index f197b238ff..36611ee077 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -357,7 +357,6 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(ODDBALL_TYPE) \
V(CELL_TYPE) \
V(PROPERTY_CELL_TYPE) \
- V(BOX_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
@@ -395,6 +394,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(POLYMORPHIC_CODE_CACHE_TYPE) \
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(BOX_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
@@ -699,7 +699,6 @@ enum InstanceType {
ODDBALL_TYPE,
CELL_TYPE,
PROPERTY_CELL_TYPE,
- BOX_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
@@ -738,6 +737,7 @@ enum InstanceType {
POLYMORPHIC_CODE_CACHE_TYPE,
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
+ BOX_TYPE,
// The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
@@ -1644,6 +1644,12 @@ class JSReceiver: public HeapObject {
CERTAINLY_NOT_STORE_FROM_KEYED
};
+ // Indicates whether a value can be loaded as a constant.
+ enum StoreMode {
+ ALLOW_AS_CONSTANT,
+ FORCE_FIELD
+ };
+
// Internal properties (e.g. the hidden properties dictionary) might
// be added even though the receiver is non-extensible.
enum ExtensibilityCheck {
@@ -1871,14 +1877,16 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check);
+ ExtensibilityCheck extensibility_check,
+ StoreMode mode = ALLOW_AS_CONSTANT);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION);
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -1906,7 +1914,8 @@ class JSObject: public JSReceiver {
Name* key,
Object* value,
PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION);
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -2205,9 +2214,9 @@ class JSObject: public JSReceiver {
// normal property is added instead, with a map transition.
// This avoids the creation of many maps with the same constant
// function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantFunctionProperty(
+ MUST_USE_RESULT MaybeObject* AddConstantProperty(
Name* name,
- JSFunction* function,
+ Object* constant,
PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
@@ -2272,7 +2281,8 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
- ValueType value_type = OPTIMAL_REPRESENTATION);
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2863,7 +2873,7 @@ class DescriptorArray: public FixedArray {
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
- inline JSFunction* GetConstantFunction(int descriptor_number);
+ inline Object* GetConstant(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
@@ -4518,7 +4528,7 @@ class Code: public HeapObject {
enum StubType {
NORMAL,
FIELD,
- CONSTANT_FUNCTION,
+ CONSTANT,
CALLBACKS,
INTERCEPTOR,
MAP_TRANSITION,
@@ -4561,7 +4571,7 @@ class Code: public HeapObject {
// [type_feedback_info]: Struct containing type feedback information for
// unoptimized code. Optimized code can temporarily store the head of
- // the list of the dependent optimized functions during deoptimization.
+ // the list of code to be deoptimized during mark-compact GC.
// STUBs can use this slot to store arbitrary information as a Smi.
// Will contain either a TypeFeedbackInfo object, or JSFunction object,
// or undefined, or a Smi.
@@ -4569,8 +4579,11 @@ class Code: public HeapObject {
inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
inline int stub_info();
inline void set_stub_info(int info);
- inline Object* deoptimizing_functions();
- inline void set_deoptimizing_functions(Object* value);
+
+ // Used during GC to code a list of code objects to deoptimize.
+ inline Object* code_to_deoptimize_link();
+ inline void set_code_to_deoptimize_link(Object* value);
+ inline Object** code_to_deoptimize_link_slot();
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -5626,6 +5639,7 @@ class Map: public HeapObject {
inline void NotifyLeafMapLayoutChange();
inline bool CanOmitPrototypeChecks();
+ inline bool CanOmitMapChecks();
void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
CompilationInfo* info);
@@ -6761,18 +6775,6 @@ class JSFunction: public JSObject {
// Retrieve the native context from a function's literal array.
static Context* NativeContextFromLiterals(FixedArray* literals);
-#ifdef DEBUG
- bool FunctionsInFunctionListShareSameCode() {
- Object* current = this;
- while (!current->IsUndefined()) {
- JSFunction* function = JSFunction::cast(current);
- current = function->next_function_link();
- if (function->code() != this->code()) return false;
- }
- return true;
- }
-#endif
-
bool PassesHydrogenFilter();
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 21ef237107..11d60c33d2 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -60,12 +60,23 @@ void OptimizingCompilerThread::Run() {
OS::Sleep(FLAG_parallel_recompilation_delay);
}
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
- }
- return;
+ switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+ case CONTINUE:
+ break;
+ case STOP:
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_total_ = OS::Ticks() - epoch;
+ }
+ stop_semaphore_->Signal();
+ return;
+ case FLUSH:
+ // Reset input queue semaphore.
+ delete input_queue_semaphore_;
+ input_queue_semaphore_ = OS::CreateSemaphore(0);
+ // Signal for main thread to start flushing.
+ stop_semaphore_->Signal();
+ // Return to start of consumer loop.
+ continue;
}
int64_t compiling_start = 0;
@@ -102,9 +113,41 @@ void OptimizingCompilerThread::CompileNext() {
}
+void OptimizingCompilerThread::FlushQueue(
+ UnboundQueue<OptimizingCompiler*>* queue,
+ bool restore_function_code) {
+ ASSERT(!IsOptimizerThread());
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (queue->Dequeue(&optimizing_compiler)) {
+ CompilationInfo* info = optimizing_compiler->info();
+ if (restore_function_code) {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ delete info;
+ }
+}
+
+
+void OptimizingCompilerThread::Flush() {
+ ASSERT(!IsOptimizerThread());
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ input_queue_semaphore_->Signal();
+
+ FlushQueue(&input_queue_, true);
+ NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+
+ stop_semaphore_->Wait();
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+
+ FlushQueue(&output_queue_, true);
+}
+
+
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
@@ -114,14 +157,8 @@ void OptimizingCompilerThread::Stop() {
while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
InstallOptimizedFunctions();
} else {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
- while (output_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
+ FlushQueue(&input_queue_, false);
+ FlushQueue(&output_queue_, false);
}
if (FLAG_trace_parallel_recompilation) {
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index 275ceb40b7..5a87a975e5 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -54,13 +54,13 @@ class OptimizingCompilerThread : public Thread {
install_mutex_(OS::CreateMutex()),
time_spent_compiling_(0),
time_spent_total_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
void Run();
void Stop();
- void CompileNext();
+ void Flush();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
@@ -92,6 +92,12 @@ class OptimizingCompilerThread : public Thread {
}
private:
+ enum StopFlag { CONTINUE, STOP, FLUSH };
+
+ void FlushQueue(UnboundQueue<OptimizingCompiler*>* queue,
+ bool restore_function_code);
+ void CompileNext();
+
#ifdef DEBUG
int thread_id_;
Mutex* thread_id_mutex_;
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 51321c7b33..4c7b017592 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -51,35 +51,10 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
static Mutex* limit_mutex = NULL;
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Nothing special about Cygwin.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -126,11 +101,6 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -147,48 +117,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
void OS::DumpBacktrace() {
// Currently unsupported.
}
@@ -470,110 +398,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
- pthread_key_t pthread_key) {
- // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
- // because pthread_key_t is a pointer type on Cygwin. This will probably not
- // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
- return static_cast<Thread::LocalStorageKey>(ptr_key);
-}
-
-
-static inline pthread_key_t LocalKeyToPthreadKey(
- Thread::LocalStorageKey local_key) {
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = static_cast<intptr_t>(local_key);
- return reinterpret_cast<pthread_key_t>(ptr_key);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return PthreadKeyToLocalKey(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class CygwinSemaphore : public Semaphore {
public:
explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index c771cd3be0..e0917fa567 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -62,40 +62,10 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on FreeBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
static Mutex* limit_mutex = NULL;
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // FreeBSD runs on anything.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // 16 byte alignment on FreeBSD
- return 16;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -139,11 +109,6 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
@@ -161,40 +126,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
- asm("bkpt 0");
-#else
- asm("int $3");
-#endif
-}
-
-
void OS::DumpBacktrace() {
POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
}
@@ -441,90 +372,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class FreeBSDSemaphore : public Semaphore {
public:
explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 613d2434b9..5c252bbf88 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -75,29 +75,10 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
static Mutex* limit_mutex = NULL;
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Linux runs on anything.
-}
-
-
#ifdef __arm__
static bool CPUInfoContainsString(const char * search_string) {
const char* file_name = "/proc/cpuinfo";
@@ -327,20 +308,6 @@ bool OS::MipsCpuHasFeature(CpuFeature feature) {
#endif // def __mips__
-int OS::ActivationFrameAlignment() {
-#if V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#endif
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -384,11 +351,6 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -407,49 +369,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
- }
- abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-// which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
- asm("bkpt 0");
-#elif defined(__mips__)
- asm("break");
-#elif defined(__native_client__)
- asm("hlt");
-#else
- asm("int $3");
-#endif
-}
-
-
void OS::DumpBacktrace() {
// backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
@@ -764,101 +683,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
-#if defined(__native_client__)
- // use default stack size.
-#else
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
-#endif
- int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- CHECK_EQ(0, result);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class LinuxSemaphore : public Semaphore {
public:
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 097691be07..6135cd1374 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -78,29 +78,10 @@ extern "C" {
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on MacOSX since a pthread_t is
-// a pointer.
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- // Correct Mac OS X Leopard 'ceil' behavior.
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
-}
-
static Mutex* limit_mutex = NULL;
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
@@ -126,11 +107,6 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return getpagesize();
-}
-
-
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
@@ -160,35 +136,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- usleep(1000 * milliseconds);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
void OS::DumpBacktrace() {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL) return;
@@ -284,21 +231,6 @@ void OS::SignalCodeMovingGC() {
}
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- // MacOSX requires all these to install so we can assume they are present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
-}
-
-
-int OS::ActivationFrameAlignment() {
- // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
- // Function Call Guide".
- return 16;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -460,177 +392,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
- // pthread_setname_np is only available in 10.6 or later, so test
- // for it at runtime.
- int (*dynamic_pthread_setname_np)(const char*);
- *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
- dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (!dynamic_pthread_setname_np)
- return;
-
- // Mac OS X does not expose the length limit of the name, so hardcode it.
- static const int kMaxNameLength = 63;
- USE(kMaxNameLength);
- ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
- dynamic_pthread_setname_np(name);
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- SetThreadName(thread->name());
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
- const size_t kBufferSize = 128;
- char buffer[kBufferSize];
- size_t buffer_size = kBufferSize;
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
- }
- // The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component.
- // Make sure the buffer is 0-terminated.
- buffer[kBufferSize - 1] = '\0';
- char* period_pos = strchr(buffer, '.');
- *period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
- // The constants below are taken from pthreads.s from the XNU kernel
- // sources archive at www.opensource.apple.com.
- if (kernel_version_major < 11) {
- // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
- // same offsets.
-#if V8_HOST_ARCH_IA32
- kMacTlsBaseOffset = 0x48;
-#else
- kMacTlsBaseOffset = 0x60;
-#endif
- } else {
- // 11.x.x (Lion) changed the offset.
- kMacTlsBaseOffset = 0;
- }
-
- Release_Store(&tls_base_offset_initialized, 1);
-}
-
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
- void* expected = reinterpret_cast<void*>(0x1234CAFE);
- Thread::SetThreadLocal(key, expected);
- void* actual = Thread::GetExistingThreadLocal(key);
- if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
- }
- Thread::SetThreadLocal(key, NULL);
-}
-
-#endif // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
- bool check_fast_tls = false;
- if (tls_base_offset_initialized == 0) {
- check_fast_tls = true;
- InitializeTlsBaseOffset();
- }
-#endif
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
-#ifdef V8_FAST_TLS_SUPPORTED
- // If we just initialized fast TLS support, make sure it works.
- if (check_fast_tls) CheckFastTls(typed_key);
-#endif
- return typed_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class MacOSSemaphore : public Semaphore {
public:
explicit MacOSSemaphore(int count) {
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index a40df48d81..e59160109f 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -60,63 +60,10 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
static Mutex* limit_mutex = NULL;
-static void* GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#if V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc).
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-int OS::ActivationFrameAlignment() {
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -160,17 +107,12 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return sysconf(_SC_PAGESIZE);
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = GetRandomMmapAddr();
+ void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
@@ -183,36 +125,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- unsigned int ms = static_cast<unsigned int>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
void OS::DumpBacktrace() {
// Currently unsupported.
}
@@ -395,7 +307,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(GetRandomMmapAddr(),
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
@@ -467,7 +379,7 @@ bool VirtualMemory::Guard(void* address) {
void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(GetRandomMmapAddr(),
+ void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
@@ -517,96 +429,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) {}
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
-#ifdef PR_SET_NAME
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(thread->name()), // NOLINT
- 0, 0, 0);
-#endif
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
- }
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class OpenBSDSemaphore : public Semaphore {
public:
explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 3aa8587cf4..13b819bd1e 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -31,7 +31,11 @@
#include "platform-posix.h"
+#include <dlfcn.h>
#include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h> // for pthread_set_name_np
+#endif
#include <sched.h> // for sched_yield
#include <unistd.h>
#include <errno.h>
@@ -43,6 +47,13 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
+#if defined(__linux__)
+#include <sys/prctl.h> // for prctl
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h> // for sysctl
+#endif
#include <arpa/inet.h>
#include <netinet/in.h>
@@ -63,6 +74,21 @@
namespace v8 {
namespace internal {
+// 0 is never a valid thread id.
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if defined(__APPLE__)
+ // Mac OS X requires all these to install so we can assume they are present.
+ // These constants are defined by the CPUid instructions.
+ const uint64_t one = 1;
+ return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
+#else
+ return 0; // Nothing special about the other systems.
+#endif
+}
+
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
@@ -75,16 +101,44 @@ intptr_t OS::MaxVirtualMemory() {
}
+int OS::ActivationFrameAlignment() {
+#if V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
+ return 8;
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#else
+ // Otherwise we just assume 16 byte alignment, i.e.:
+ // - With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
+ // see "Mac OS X ABI Function Call Guide"
+ return 16;
+#endif
+}
+
+
intptr_t OS::CommitPageSize() {
static intptr_t page_size = getpagesize();
return page_size;
}
-#ifndef __CYGWIN__
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
-#if defined(__native_client__)
+#if defined(__CYGWIN__)
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif defined(__native_client__)
// The Native Client port of V8 uses an interpreter, so
// code pages don't need PROT_EXEC.
mprotect(address, size, PROT_READ);
@@ -96,9 +150,13 @@ void OS::ProtectCode(void* address, const size_t size) {
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+#else
mprotect(address, size, PROT_NONE);
+#endif
}
-#endif // __CYGWIN__
void* OS::GetRandomMmapAddr() {
@@ -150,9 +208,59 @@ void* OS::GetRandomMmapAddr() {
}
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
+void OS::Sleep(int milliseconds) {
+ useconds_t ms = static_cast<useconds_t>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ if (FLAG_break_on_abort) {
+ DebugBreak();
+ }
+ abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+ asm("bkpt 0");
+#elif V8_HOST_ARCH_MIPS
+ asm("break");
+#elif V8_HOST_ARCH_IA32
+#if defined(__native_client__)
+ asm("hlt");
+#else
+ asm("int $3");
+#endif // __native_client__
+#elif V8_HOST_ARCH_X64
+ asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
+double ceiling(double x) {
+ // Correct buggy 'ceil' on some systems (i.e. FreeBSD, OS X 10.5)
+ return (-1.0 < x && x < 0.0) ? -0.0 : ceil(x);
+}
+
+
double modulo(double x, double y) {
return fmod(x, y);
}
@@ -174,7 +282,7 @@ UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-#undef MATH_FUNCTION
+#undef UNARY_MATH_FUNCTION
void lazily_initialize_fast_exp() {
@@ -386,7 +494,7 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#endif
-void POSIXPostSetUp() {
+void OS::PostSetUp() {
#if V8_TARGET_ARCH_IA32
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
@@ -425,8 +533,226 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
// POSIX thread support.
//
+class Thread::PlatformData : public Malloced {
+ public:
+ PlatformData() : thread_(kNoThread) {}
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
+ set_name(options.name());
+}
+
+
+Thread::~Thread() {
+ delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+ pthread_set_name_np(pthread_self(), name);
+#elif defined(__NetBSD__)
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+ pthread_setname_np(pthread_self(), "%s", name);
+#elif defined(__APPLE__)
+ // pthread_setname_np is only available in 10.6 or later, so test
+ // for it at runtime.
+ int (*dynamic_pthread_setname_np)(const char*);
+ *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+ dlsym(RTLD_DEFAULT, "pthread_setname_np");
+ if (dynamic_pthread_setname_np == NULL)
+ return;
+
+ // Mac OS X does not expose the length limit of the name, so hardcode it.
+ static const int kMaxNameLength = 63;
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+ dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(name), // NOLINT
+ 0, 0, 0);
+#endif
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->data()->thread_ = pthread_self();
+ SetThreadName(thread->name());
+ ASSERT(thread->data()->thread_ != kNoThread);
+ thread->NotifyStartedAndRun();
+ return NULL;
+}
+
+
+void Thread::set_name(const char* name) {
+ strncpy(name_, name, sizeof(name_));
+ name_[sizeof(name_) - 1] = '\0';
+}
+
+
+void Thread::Start() {
+ int result;
+ pthread_attr_t attr;
+ memset(&attr, 0, sizeof(attr));
+ result = pthread_attr_init(&attr);
+ ASSERT_EQ(0, result);
+ // Native client uses default stack size.
+#if !defined(__native_client__)
+ if (stack_size_ > 0) {
+ result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ ASSERT_EQ(0, result);
+ }
+#endif
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ ASSERT_EQ(0, result);
+ result = pthread_attr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ ASSERT(data_->thread_ != kNoThread);
+ USE(result);
+}
+
+
+void Thread::Join() {
+ pthread_join(data_->thread_, NULL);
+}
+
+
void Thread::YieldCPU() {
- sched_yield();
+ int result = sched_yield();
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if defined(__CYGWIN__)
+ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+ // because pthread_key_t is a pointer type on Cygwin. This will probably not
+ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+ return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+ return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
+}
+
+
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if defined(__CYGWIN__)
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = static_cast<intptr_t>(local_key);
+ return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+ return static_cast<pthread_key_t>(local_key);
+#endif
+}
+
+
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+ const size_t kBufferSize = 128;
+ char buffer[kBufferSize];
+ size_t buffer_size = kBufferSize;
+ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ }
+ // The buffer now contains a string of the form XX.YY.ZZ, where
+ // XX is the major kernel version component.
+ // Make sure the buffer is 0-terminated.
+ buffer[kBufferSize - 1] = '\0';
+ char* period_pos = strchr(buffer, '.');
+ *period_pos = '\0';
+ int kernel_version_major =
+ static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ // The constants below are taken from pthreads.s from the XNU kernel
+ // sources archive at www.opensource.apple.com.
+ if (kernel_version_major < 11) {
+ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+ // same offsets.
+#if V8_HOST_ARCH_IA32
+ kMacTlsBaseOffset = 0x48;
+#else
+ kMacTlsBaseOffset = 0x60;
+#endif
+ } else {
+ // 11.x.x (Lion) changed the offset.
+ kMacTlsBaseOffset = 0;
+ }
+
+ Release_Store(&tls_base_offset_initialized, 1);
+}
+
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+ void* expected = reinterpret_cast<void*>(0x1234CAFE);
+ Thread::SetThreadLocal(key, expected);
+ void* actual = Thread::GetExistingThreadLocal(key);
+ if (expected != actual) {
+ V8_Fatal(__FILE__, __LINE__,
+ "V8 failed to initialize fast TLS on current kernel");
+ }
+ Thread::SetThreadLocal(key, NULL);
+}
+
+#endif // V8_FAST_TLS_SUPPORTED
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+ bool check_fast_tls = false;
+ if (tls_base_offset_initialized == 0) {
+ check_fast_tls = true;
+ InitializeTlsBaseOffset();
+ }
+#endif
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+ // If we just initialized fast TLS support, make sure it works.
+ if (check_fast_tls) CheckFastTls(local_key);
+#endif
+ return local_key;
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_key_delete(pthread_key);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_setspecific(pthread_key, value);
+ ASSERT_EQ(0, result);
+ USE(result);
}
diff --git a/deps/v8/src/platform-posix.h b/deps/v8/src/platform-posix.h
index bcc2b7e74e..6b73387cd7 100644
--- a/deps/v8/src/platform-posix.h
+++ b/deps/v8/src/platform-posix.h
@@ -38,9 +38,6 @@
namespace v8 {
namespace internal {
-// Used by platform implementation files during OS::PostSetUp().
-void POSIXPostSetUp();
-
// Used by platform implementation files during OS::DumpBacktrace()
// and OS::StackWalk().
template<int (*backtrace)(void**, int),
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 3c4df665f0..b1d88af293 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -81,35 +81,9 @@ namespace v8 {
namespace internal {
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
- return ceil(x);
-}
-
-
static Mutex* limit_mutex = NULL;
-void OS::PostSetUp() {
- POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Solaris runs on a lot of things.
-}
-
-
-int OS::ActivationFrameAlignment() {
- // GCC generates code that requires 16 byte alignment such as movdqa.
- return Max(STACK_ALIGN, 16);
-}
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -150,11 +124,6 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
}
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(getpagesize());
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -172,36 +141,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
- asm("int $3");
-}
-
-
void OS::DumpBacktrace() {
// Currently unsupported.
}
@@ -454,90 +393,6 @@ bool VirtualMemory::HasLazyCommits() {
}
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() : thread_(kNoThread) { }
-
- pthread_t thread_; // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- pthread_attr_t attr;
- if (stack_size_ > 0) {
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- }
- pthread_create(&data_->thread_, NULL, ThreadEntry, this);
- ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- USE(result);
- ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- int result = pthread_key_delete(pthread_key);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
- pthread_setspecific(pthread_key, value);
-}
-
-
class SolarisSemaphore : public Semaphore {
public:
explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-tls-mac.h b/deps/v8/src/platform-tls-mac.h
deleted file mode 100644
index d1c5907191..0000000000
--- a/deps/v8/src/platform-tls-mac.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_MAC_H_
-#define V8_PLATFORM_TLS_MAC_H_
-
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-extern intptr_t kMacTlsBaseOffset;
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- intptr_t result;
-#if V8_HOST_ARCH_IA32
- asm("movl %%gs:(%1,%2,4), %0;"
- :"=r"(result) // Output must be a writable register.
- :"r"(kMacTlsBaseOffset), "r"(index));
-#else
- asm("movq %%gs:(%1,%2,8), %0;"
- :"=r"(result)
- :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
- return result;
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_MAC_H_
diff --git a/deps/v8/src/platform-tls-win32.h b/deps/v8/src/platform-tls-win32.h
deleted file mode 100644
index 4056e8cc67..0000000000
--- a/deps/v8/src/platform-tls-win32.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_TLS_WIN32_H_
-#define V8_PLATFORM_TLS_WIN32_H_
-
-#include "checks.h"
-#include "globals.h"
-#include "win32-headers.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(_WIN32) && !defined(_WIN64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- const intptr_t kTibInlineTlsOffset = 0xE10;
- const intptr_t kTibExtraTlsOffset = 0xF94;
- const intptr_t kMaxInlineSlots = 64;
- const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- ASSERT(0 <= index && index < kMaxSlots);
- if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
- }
- intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- ASSERT(extra != 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TLS_WIN32_H_
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 211be39d00..8b27c19a65 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -69,6 +69,7 @@ int signbit(double x);
// Microsoft Visual C++ specific stuff.
#ifdef _MSC_VER
+#include "win32-headers.h"
#include "win32-math.h"
int strncasecmp(const char* s1, const char* s2, int n);
@@ -90,7 +91,6 @@ inline int lrint(double flt) {
return intgr;
}
-
#endif // _MSC_VER
#ifndef __CYGWIN__
@@ -101,7 +101,6 @@ int random();
#endif // WIN32
#include "lazy-instance.h"
-#include "platform-tls.h"
#include "utils.h"
#include "v8globals.h"
@@ -129,6 +128,60 @@ void lazily_initialize_fast_exp();
class Socket;
// ----------------------------------------------------------------------------
+// Fast TLS support
+
+#ifndef V8_NO_FAST_TLS
+
+#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ const intptr_t kTibInlineTlsOffset = 0xE10;
+ const intptr_t kTibExtraTlsOffset = 0xF94;
+ const intptr_t kMaxInlineSlots = 64;
+ const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+ ASSERT(0 <= index && index < kMaxSlots);
+ if (index < kMaxInlineSlots) {
+ return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+ kPointerSize * index));
+ }
+ intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+ ASSERT(extra != 0);
+ return *reinterpret_cast<intptr_t*>(extra +
+ kPointerSize * (index - kMaxInlineSlots));
+}
+
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+extern intptr_t kMacTlsBaseOffset;
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ intptr_t result;
+#if V8_HOST_ARCH_IA32
+ asm("movl %%gs:(%1,%2,4), %0;"
+ :"=r"(result) // Output must be a writable register.
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+ asm("movq %%gs:(%1,%2,8), %0;"
+ :"=r"(result)
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+ return result;
+}
+
+#endif
+
+#endif // V8_NO_FAST_TLS
+
+
+// ----------------------------------------------------------------------------
// OS
//
// This class has static methods for the different platform specific
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index cc86724437..8428303afe 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -655,7 +655,8 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry** entry = entries.start();
memset(entry, 0, entries.length() * sizeof(*entry));
if (sample.pc != NULL) {
- if (sample.has_external_callback) {
+ if (sample.has_external_callback && sample.state == EXTERNAL &&
+ sample.top_frame_type == StackFrame::EXIT) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index ac365634cd..6b62ddb18e 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -67,7 +67,7 @@ enum PropertyType {
NORMAL = 0,
// Only in fast mode.
FIELD = 1,
- CONSTANT_FUNCTION = 2,
+ CONSTANT = 2,
CALLBACKS = 3,
// Only in lookup results, not in descriptors.
HANDLER = 4,
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 80a06cb7f5..83a6a365b8 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -55,10 +55,10 @@ void LookupResult::Print(FILE* out) {
PrintF(out, " -type = normal\n");
PrintF(out, " -entry = %d", GetDictionaryEntry());
break;
- case CONSTANT_FUNCTION:
- PrintF(out, " -type = constant function\n");
- PrintF(out, " -function:\n");
- GetConstantFunction()->Print(out);
+ case CONSTANT:
+ PrintF(out, " -type = constant\n");
+ PrintF(out, " -value:\n");
+ GetConstant()->Print(out);
PrintF(out, "\n");
break;
case FIELD:
@@ -85,7 +85,7 @@ void LookupResult::Print(FILE* out) {
GetTransitionMap()->Print(out);
PrintF(out, "\n");
return;
- case CONSTANT_FUNCTION:
+ case CONSTANT:
PrintF(out, " -type = constant property transition\n");
PrintF(out, " -map:\n");
GetTransitionMap()->Print(out);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 5213ee6077..d109de91d1 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -106,13 +106,13 @@ class FieldDescriptor: public Descriptor {
};
-class ConstantFunctionDescriptor: public Descriptor {
+class ConstantDescriptor: public Descriptor {
public:
- ConstantFunctionDescriptor(Name* key,
- JSFunction* function,
- PropertyAttributes attributes)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION,
- Representation::HeapObject()) {}
+ ConstantDescriptor(Name* key,
+ Object* value,
+ PropertyAttributes attributes)
+ : Descriptor(key, value, attributes, CONSTANT,
+ value->OptimalRepresentation()) {}
};
@@ -303,9 +303,13 @@ class LookupResult BASE_EMBEDDED {
return details_.type() == NORMAL;
}
+ bool IsConstant() {
+ ASSERT(!(details_.type() == CONSTANT && !IsFound()));
+ return details_.type() == CONSTANT;
+ }
+
bool IsConstantFunction() {
- ASSERT(!(details_.type() == CONSTANT_FUNCTION && !IsFound()));
- return details_.type() == CONSTANT_FUNCTION;
+ return IsConstant() && GetValue()->IsJSFunction();
}
bool IsDontDelete() { return details_.IsDontDelete(); }
@@ -324,7 +328,7 @@ class LookupResult BASE_EMBEDDED {
switch (type()) {
case FIELD:
case NORMAL:
- case CONSTANT_FUNCTION:
+ case CONSTANT:
return true;
case CALLBACKS: {
Object* callback = GetCallbackObject();
@@ -355,8 +359,8 @@ class LookupResult BASE_EMBEDDED {
}
return value;
}
- case CONSTANT_FUNCTION:
- return GetConstantFunction();
+ case CONSTANT:
+ return GetConstant();
case CALLBACKS:
case HANDLER:
case INTERCEPTOR:
@@ -392,9 +396,8 @@ class LookupResult BASE_EMBEDDED {
return IsTransition() && GetTransitionDetails(map).type() == FIELD;
}
- bool IsTransitionToConstantFunction(Map* map) {
- return IsTransition() &&
- GetTransitionDetails(map).type() == CONSTANT_FUNCTION;
+ bool IsTransitionToConstant(Map* map) {
+ return IsTransition() && GetTransitionDetails(map).type() == CONSTANT;
}
Map* GetTransitionMap() {
@@ -434,13 +437,22 @@ class LookupResult BASE_EMBEDDED {
}
JSFunction* GetConstantFunction() {
- ASSERT(type() == CONSTANT_FUNCTION);
+ ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
+ Object* GetConstantFromMap(Map* map) {
+ ASSERT(type() == CONSTANT);
+ return GetValueFromMap(map);
+ }
+
JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(GetValueFromMap(map));
+ return JSFunction::cast(GetConstantFromMap(map));
+ }
+
+ Object* GetConstant() {
+ ASSERT(type() == CONSTANT);
+ return GetValue();
}
Object* GetCallbackObject() {
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index c36d453d03..40fae3aa05 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -265,6 +265,9 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
+ JSReceiver::StoreMode mode = value->IsJSObject()
+ ? JSReceiver::FORCE_FIELD
+ : JSReceiver::ALLOW_AS_CONSTANT;
if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
@@ -274,7 +277,8 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
+ boilerplate, name, value, NONE,
+ Object::OPTIMAL_REPRESENTATION, mode);
}
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
@@ -290,7 +294,8 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name =
isolate->factory()->NewStringFromAscii(CStrVector(str));
result = JSObject::SetLocalPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
+ boilerplate, name, value, NONE,
+ Object::OPTIMAL_REPRESENTATION, mode);
}
// If setting the property on the boilerplate throws an
// exception, the exception is converted to an empty handle in
@@ -2203,8 +2208,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
}
} else {
// Ignore re-initialization of constants that have already been
- // assigned a function value.
- ASSERT(lookup.IsReadOnly() && lookup.IsConstantFunction());
+ // assigned a constant value.
+ ASSERT(lookup.IsReadOnly() && lookup.IsConstant());
}
// Use the set value as the result of the operation.
@@ -2943,6 +2948,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
+ ASSERT(frame->function()->is_compiled());
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
@@ -4969,8 +4975,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
return lookup.holder()->FastPropertyAt(
lookup.representation(),
lookup.GetFieldIndex().field_index());
- case CONSTANT_FUNCTION:
- return lookup.GetConstantFunction();
+ case CONSTANT:
+ return lookup.GetConstant();
case CALLBACKS:
case HANDLER:
case INTERCEPTOR:
@@ -8460,8 +8466,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
- while (function->IsMarkedForParallelRecompilation() ||
- function->IsInRecompileQueue() ||
+ while (function->IsInRecompileQueue() ||
function->IsMarkedForInstallingRecompiledCode()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
@@ -10554,8 +10559,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
}
return value;
}
- case CONSTANT_FUNCTION:
- return result->GetConstantFunction();
+ case CONSTANT:
+ return result->GetConstant();
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
@@ -13345,20 +13350,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
#endif // ENABLE_DEBUGGER_SUPPORT
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- SealHandleScope shs(isolate);
- v8::V8::ResumeProfiler();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- SealHandleScope shs(isolate);
- v8::V8::PauseProfiler();
- return isolate->heap()->undefined_value();
-}
-
-
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
// for the requested break point. For lazily compiled functions several heap
@@ -13429,50 +13420,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
}
-// Mark a function to recognize when called after GC to format the stack trace.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MarkOneShotGetter) {
- HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<String> key = isolate->factory()->hidden_stack_trace_string();
- JSObject::SetHiddenProperty(fun, key, key);
- return *fun;
-}
-
-
-// Retrieve the stack trace. This could be the raw stack trace collected
-// on stack overflow or the already formatted stack trace string.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) {
+// Retrieve the stack trace. This is the raw stack trace that yet has to
+// be formatted. Since we only need this once, clear it afterwards.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
HandleScope scope(isolate);
ASSERT_EQ(args.length(), 1);
CONVERT_ARG_CHECKED(JSObject, error_object, 0);
String* key = isolate->heap()->hidden_stack_trace_string();
Object* result = error_object->GetHiddenProperty(key);
- if (result->IsTheHole()) result = isolate->heap()->undefined_value();
- RUNTIME_ASSERT(result->IsJSArray() ||
- result->IsString() ||
- result->IsUndefined());
+ if (result->IsTheHole()) return isolate->heap()->undefined_value();
+ RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
+ error_object->DeleteHiddenProperty(key);
return result;
}
-// Set or clear the stack trace attached to an stack overflow error object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) {
- HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, value, 1);
- Handle<String> key = isolate->factory()->hidden_stack_trace_string();
- if (value->IsUndefined()) {
- error_object->DeleteHiddenProperty(*key);
- } else {
- RUNTIME_ASSERT(value->IsString());
- JSObject::SetHiddenProperty(error_object, key, value);
- }
- return *error_object;
-}
-
-
// Returns V8 version as a string.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
SealHandleScope shs(isolate);
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index a8c10d92d5..400145f892 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -244,9 +244,7 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \
F(CollectStackTrace, 3, 1) \
- F(MarkOneShotGetter, 1, 1) \
- F(GetOverflowedStackTrace, 1, 1) \
- F(SetOverflowedStackTrace, 2, 1) \
+ F(GetAndClearOverflowedStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
F(ClassOf, 1, 1) \
@@ -467,10 +465,7 @@ namespace internal {
F(TransitionElementsKind, 2, 1) \
F(TransitionElementsSmiToDouble, 1, 1) \
F(TransitionElementsDoubleToObject, 1, 1) \
- F(HaveSameMap, 2, 1) \
- /* profiler */ \
- F(ProfilerResume, 0, 1) \
- F(ProfilerPause, 0, 1)
+ F(HaveSameMap, 2, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 222b3182e8..d72ed1acdb 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -69,6 +69,7 @@
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
+#include "vm-state-inl.h"
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -621,9 +622,13 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
return;
}
- const Address callback = isolate->external_callback();
- if (callback != NULL) {
- external_callback = callback;
+ ExternalCallbackScope* scope = isolate->external_callback_scope();
+ Address handler = Isolate::handler(isolate->thread_local_top());
+ // If there is a handler on top of the external callback scope then
+ // we have already entrered JavaScript again and the external callback
+ // is not the top function.
+ if (scope && scope->scope_address() < handler) {
+ external_callback = scope->callback();
has_external_callback = true;
} else {
// Sample potential return address value for frameless invocation of
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index dda55919c4..b47452e421 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -781,7 +781,10 @@ class Page : public MemoryChunk {
// Object area size in bytes.
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
- // Maximum object size that fits in a page.
+ // Maximum object size that fits in a page. Objects larger than that size
+ // are allocated in large object space and are never moved in memory. This
+ // also applies to new space allocation, since objects are never migrated
+ // from new space to large object space.
static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
// Page size mask.
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 436cd46ce1..73c7a0a208 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -335,10 +335,10 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<JSFunction> value) {
+ Handle<Object> value) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
Handle<Code> handler = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT_FUNCTION);
+ name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT);
if (!handler.is_null()) return handler;
LoadStubCompiler compiler(isolate_);
@@ -417,11 +417,11 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<JSFunction> value) {
+ Handle<Object> value) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
Handle<Code> handler = FindLoadHandler(
name, receiver, stub_holder, Code::KEYED_LOAD_IC,
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
if (!handler.is_null()) return handler;
KeyedLoadStubCompiler compiler(isolate_);
@@ -563,16 +563,15 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Code::STORE_IC, Code::NORMAL, stub.GetExtraICState());
if (!code.is_null()) return code;
- if (is_constant) return stub.GetCode(isolate_);
-
// Replace the placeholder cell and global object map with the actual global
// cell and receiver map.
- Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
Handle<Map> meta_map(isolate_->heap()->meta_map());
Handle<Object> receiver_map(receiver->map(), isolate_);
code = stub.GetCodeCopyFromTemplate(isolate_);
code->ReplaceNthObject(1, *meta_map, *receiver_map);
+ Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
code->ReplaceNthObject(1, *cell_map, *cell);
+
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
@@ -699,7 +698,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::CONSTANT_FUNCTION, argc, cache_holder);
+ kind, extra_state, Code::CONSTANT, argc, cache_holder);
Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -1620,14 +1619,14 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
- Handle<JSFunction> value) {
+ Handle<Object> value) {
Label success;
HandlerFrontend(object, receiver(), holder, name, &success);
__ bind(&success);
GenerateLoadConstant(value);
// Return the generated code.
- return GetCode(kind(), Code::CONSTANT_FUNCTION, name);
+ return GetCode(kind(), Code::CONSTANT, name);
}
@@ -2131,14 +2130,14 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
if (function->shared()->name()->IsString()) {
function_name = Handle<String>(String::cast(function->shared()->name()));
}
- return GetCode(Code::CONSTANT_FUNCTION, function_name);
+ return GetCode(Code::CONSTANT, function_name);
}
CallOptimization::CallOptimization(LookupResult* lookup) {
if (lookup->IsFound() &&
lookup->IsCacheable() &&
- lookup->type() == CONSTANT_FUNCTION) {
+ lookup->IsConstantFunction()) {
// We only optimize constant function calls.
Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
} else {
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 73a1a8a76f..bd0678ed3a 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -144,7 +144,7 @@ class StubCache {
Handle<Code> ComputeLoadConstant(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<JSFunction> value);
+ Handle<Object> value);
Handle<Code> ComputeLoadInterceptor(Handle<Name> name,
Handle<JSObject> object,
@@ -176,7 +176,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<JSFunction> value);
+ Handle<Object> value);
Handle<Code> ComputeKeyedLoadInterceptor(Handle<Name> name,
Handle<JSObject> object,
@@ -708,7 +708,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
- Handle<JSFunction> value);
+ Handle<Object> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
@@ -741,7 +741,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<JSObject> holder,
PropertyIndex field,
Representation representation);
- void GenerateLoadConstant(Handle<JSFunction> value);
+ void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
void GenerateLoadInterceptor(Register holder_reg,
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 04d7f7639b..992adaa77c 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -225,8 +225,17 @@ class Token {
return op == EQ || op == EQ_STRICT;
}
+ static bool IsInequalityOp(Value op) {
+ return op == NE || op == NE_STRICT;
+ }
+
+ static bool IsArithmeticCompareOp(Value op) {
+ return IsOrderedRelationalCompareOp(op) ||
+ IsEqualityOp(op) || IsInequalityOp(op);
+ }
+
static Value NegateCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
+ ASSERT(IsArithmeticCompareOp(op));
switch (op) {
case EQ: return NE;
case NE: return EQ;
@@ -243,7 +252,7 @@ class Token {
}
static Value ReverseCompareOp(Value op) {
- ASSERT(IsCompareOp(op));
+ ASSERT(IsArithmeticCompareOp(op));
switch (op) {
case EQ: return EQ;
case NE: return NE;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index b905a74716..769df07e4f 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -709,8 +709,7 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
Representation Representation::FromType(TypeInfo info) {
if (info.IsUninitialized()) return Representation::None();
- // TODO(verwaest): Return Smi rather than Integer32.
- if (info.IsSmi()) return Representation::Integer32();
+ if (info.IsSmi()) return Representation::Smi();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
if (info.IsNumber()) return Representation::Double();
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 601012d58e..d5357b4f2c 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -237,7 +237,7 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
function DataViewGetBuffer() {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.buffer', this]);
}
return %DataViewGetBuffer(this);
@@ -245,7 +245,7 @@ function DataViewGetBuffer() {
function DataViewGetByteOffset() {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.byteOffset', this]);
}
return %DataViewGetByteOffset(this);
@@ -253,7 +253,7 @@ function DataViewGetByteOffset() {
function DataViewGetByteLength() {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.byteLength', this]);
}
return %DataViewGetByteLength(this);
@@ -265,7 +265,7 @@ function ToPositiveDataViewOffset(offset) {
function DataViewGetInt8(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getInt8', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -278,10 +278,10 @@ function DataViewGetInt8(offset, little_endian) {
function DataViewSetInt8(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setInt8', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetInt8(this,
@@ -292,7 +292,7 @@ function DataViewSetInt8(offset, value, little_endian) {
function DataViewGetUint8(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getUint8', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -305,10 +305,10 @@ function DataViewGetUint8(offset, little_endian) {
function DataViewSetUint8(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setUint8', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetUint8(this,
@@ -319,7 +319,7 @@ function DataViewSetUint8(offset, value, little_endian) {
function DataViewGetInt16(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getInt16', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -332,10 +332,10 @@ function DataViewGetInt16(offset, little_endian) {
function DataViewSetInt16(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setInt16', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetInt16(this,
@@ -346,7 +346,7 @@ function DataViewSetInt16(offset, value, little_endian) {
function DataViewGetUint16(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getUint16', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -359,10 +359,10 @@ function DataViewGetUint16(offset, little_endian) {
function DataViewSetUint16(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setUint16', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetUint16(this,
@@ -373,7 +373,7 @@ function DataViewSetUint16(offset, value, little_endian) {
function DataViewGetInt32(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getInt32', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -386,7 +386,7 @@ function DataViewGetInt32(offset, little_endian) {
function DataViewSetInt32(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setInt32', this]);
}
if (%_ArgumentsLength() < 2) {
@@ -400,7 +400,7 @@ function DataViewSetInt32(offset, value, little_endian) {
function DataViewGetUint32(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getUint32', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -413,10 +413,10 @@ function DataViewGetUint32(offset, little_endian) {
function DataViewSetUint32(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setUint32', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetUint32(this,
@@ -427,7 +427,7 @@ function DataViewSetUint32(offset, value, little_endian) {
function DataViewGetFloat32(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getFloat32', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -440,10 +440,10 @@ function DataViewGetFloat32(offset, little_endian) {
function DataViewSetFloat32(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setFloat32', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetFloat32(this,
@@ -454,7 +454,7 @@ function DataViewSetFloat32(offset, value, little_endian) {
function DataViewGetFloat64(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.getFloat64', this]);
}
if (%_ArgumentsLength() < 1) {
@@ -467,10 +467,10 @@ function DataViewGetFloat64(offset, little_endian) {
function DataViewSetFloat64(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_reciever',
+ throw MakeTypeError('incompatible_method_receiver',
['DataView.setFloat64', this]);
}
- if (%_ArgumentsLength() < 1) {
+ if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
%DataViewSetFloat64(this,
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 8bf91293fb..70ddccd6a7 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -206,6 +206,7 @@ int Type::LubBitset() {
case DECLARED_ACCESSOR_INFO_TYPE:
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
+ case FIXED_ARRAY_TYPE:
return kInternal;
default:
UNREACHABLE();
@@ -482,6 +483,7 @@ Type* Type::Optional(Handle<Type> type) {
Representation Representation::FromType(Handle<Type> type) {
if (type->Is(Type::None())) return Representation::None();
+ if (type->Is(Type::Smi())) return Representation::Smi();
if (type->Is(Type::Signed32())) return Representation::Integer32();
if (type->Is(Type::Number())) return Representation::Double();
return Representation::Tagged();
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index dfe1e20c32..ff2247cba1 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -238,6 +238,8 @@ namespace internal {
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
+ SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
+ SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 1f1ff7add7..bae1a70f8b 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 20
-#define BUILD_NUMBER 7
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 862c17e16e..658773e6d6 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -29,7 +29,8 @@
#define V8_VM_STATE_INL_H_
#include "vm-state.h"
-#include "runtime-profiler.h"
+#include "log.h"
+#include "simulator.h"
namespace v8 {
namespace internal {
@@ -80,12 +81,26 @@ VMState<Tag>::~VMState() {
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
- : isolate_(isolate), previous_callback_(isolate->external_callback()) {
- isolate_->set_external_callback(callback);
+ : isolate_(isolate),
+ callback_(callback),
+ previous_scope_(isolate->external_callback_scope()) {
+#ifdef USE_SIMULATOR
+ int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
+ scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+#endif
+ isolate_->set_external_callback_scope(this);
}
ExternalCallbackScope::~ExternalCallbackScope() {
- isolate_->set_external_callback(previous_callback_);
+ isolate_->set_external_callback_scope(previous_scope_);
+}
+
+Address ExternalCallbackScope::scope_address() {
+#ifdef USE_SIMULATOR
+ return scope_address_;
+#else
+ return reinterpret_cast<Address>(this);
+#endif
}
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 765b570159..f592bb92ca 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -50,9 +50,18 @@ class ExternalCallbackScope BASE_EMBEDDED {
public:
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
+ Address callback() { return callback_; }
+ Address* callback_address() { return &callback_; }
+ ExternalCallbackScope* previous() { return previous_scope_; }
+ inline Address scope_address();
+
private:
Isolate* isolate_;
- Address previous_callback_;
+ Address callback_;
+ ExternalCallbackScope* previous_scope_;
+#ifdef USE_SIMULATOR
+ Address scope_address_;
+#endif
};
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index b9af527eea..ae9aeee812 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -51,6 +51,16 @@ void Assembler::emitl(uint32_t x) {
}
+void Assembler::emitp(void* x, RelocInfo::Mode rmode) {
+ uintptr_t value = reinterpret_cast<uintptr_t>(x);
+ Memory::uintptr_at(pc_) = value;
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode, value);
+ }
+ pc_ += sizeof(uintptr_t);
+}
+
+
void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
Memory::uint64_at(pc_) = x;
if (!RelocInfo::IsNone(rmode)) {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index aaab839121..f5939c3b7e 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -1377,7 +1377,7 @@ void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
emit(0xA1);
- emitq(reinterpret_cast<uintptr_t>(value), mode);
+ emitp(value, mode);
}
@@ -1529,7 +1529,7 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value), rmode);
+ emitp(value, rmode);
}
@@ -1606,7 +1606,7 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
ASSERT(!HEAP->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+ emitp(value.location(), mode);
}
}
@@ -1998,7 +1998,7 @@ void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
emit(0xA3);
- emitq(reinterpret_cast<uintptr_t>(dst), mode);
+ emitp(dst, mode);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index aec50c9110..07afc129dc 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1444,6 +1444,7 @@ class Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
+ inline void emitp(void* x, RelocInfo::Mode rmode);
inline void emitq(uint64_t x, RelocInfo::Mode rmode);
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index e090437d55..551a71690e 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -39,6 +39,16 @@ namespace v8 {
namespace internal {
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -296,26 +306,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in rax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- __ Ret();
-
- __ bind(&check_heap_number);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_builtin, Label::kNear);
- __ Ret();
-
- __ bind(&call_builtin);
- __ pop(rcx); // Pop return address.
- __ push(rax);
- __ push(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index d7a73d75c9..b45e9663e2 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -46,21 +46,7 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // Get the optimized code.
- Code* code = function->code();
-
- // The optimized code is going to be patched, so we cannot use it any more.
- function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
-
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -71,7 +57,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
// before the safepoint table (space was allocated there when the Code
// object was created, if necessary).
- Address instruction_start = function->code()->instruction_start();
+ Address instruction_start = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
@@ -93,25 +79,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
prev_call_address = call_address;
#endif
}
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
}
@@ -610,6 +577,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index c9b808c10c..364c3a1824 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -410,7 +410,7 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
return constant->Integer32Value();
}
@@ -697,12 +697,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1305,9 +1299,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ imull(left, left, Immediate(right_value));
}
} else if (right->IsStackSlot()) {
- __ imull(left, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ SmiToInteger32(left, left);
+ __ imul(left, ToOperand(right));
+ } else {
+ __ imull(left, ToOperand(right));
+ }
} else {
- __ imull(left, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ SmiToInteger32(left, left);
+ __ imul(left, ToRegister(right));
+ } else {
+ __ imull(left, ToRegister(right));
+ }
}
if (can_overflow) {
@@ -1364,13 +1368,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ andl(ToRegister(left), ToOperand(right));
+ __ and_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ orl(ToRegister(left), ToOperand(right));
+ __ or_(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToOperand(right));
+ __ xor_(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1380,13 +1384,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ andl(ToRegister(left), ToRegister(right));
+ __ and_(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ orl(ToRegister(left), ToRegister(right));
+ __ or_(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToRegister(right));
+ __ xor_(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
@@ -1469,9 +1473,17 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ subl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
- __ subl(ToRegister(left), ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ subq(ToRegister(left), ToRegister(right));
+ } else {
+ __ subl(ToRegister(left), ToRegister(right));
+ }
} else {
- __ subl(ToRegister(left), ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ subq(ToRegister(left), ToOperand(right));
+ } else {
+ __ subl(ToRegister(left), ToOperand(right));
+ }
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
@@ -1510,12 +1522,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- __ Move(ToRegister(instr->result()), value);
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
+ __ LoadObject(ToRegister(instr->result()), value);
}
@@ -1657,16 +1664,28 @@ void LCodeGen::DoAddI(LAddI* instr) {
MemOperand(ToRegister(left), offset));
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- __ leal(ToRegister(instr->result()), address);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ lea(ToRegister(instr->result()), address);
+ } else {
+ __ leal(ToRegister(instr->result()), address);
+ }
}
} else {
if (right->IsConstantOperand()) {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ addq(ToRegister(left), ToRegister(right));
+ } else {
+ __ addl(ToRegister(left), ToRegister(right));
+ }
} else {
- __ addl(ToRegister(left), ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ addq(ToRegister(left), ToOperand(right));
+ } else {
+ __ addl(ToRegister(left), ToOperand(right));
+ }
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
@@ -2087,14 +2106,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- __ cmpq(left, Immediate(instr->hydrogen()->right()));
- EmitBranch(instr, equal);
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@@ -2703,9 +2714,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -2736,7 +2747,7 @@ static bool CompactEmit(SmallMapList* list,
if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
+ return lookup.IsField() || lookup.IsConstant();
}
@@ -2886,9 +2897,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
// gets replaced during bound check elimination with the index argument
// to the bounds check, which can be tagged, so that case must be
// handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -2959,9 +2968,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -3001,9 +3008,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that
// case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -3343,38 +3348,29 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr->environment());
- Label done;
+ Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- Label negative;
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
+ __ j(zero, &done);
- Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
-
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) {
- __ movq(tmp, rax);
- }
-
+ if (!tmp.is(rax)) __ movq(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
@@ -3393,7 +3389,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
- __ j(not_sign, &is_positive);
+ __ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
@@ -4023,6 +4019,18 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
@@ -4060,7 +4068,9 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ cmpq(length, ToRegister(instr->index()));
}
}
- DeoptimizeIf(below_equal, instr->environment());
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? below : below_equal;
+ ApplyCheckIf(condition, instr);
}
@@ -4074,9 +4084,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
// gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -4138,9 +4146,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// input gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -4181,9 +4187,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// input gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
+ if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -4954,6 +4958,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -5021,6 +5026,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5028,11 +5034,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), instr);
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), instr);
}
}
@@ -5075,6 +5079,23 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ movl(temp, Immediate((size / kPointerSize) - 1));
+ } else {
+ temp = ToRegister(instr->size());
+ __ sar(temp, Immediate(kPointerSizeLog2));
+ __ decl(temp);
+ }
+ Label loop;
+ __ bind(&loop);
+ __ Move(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ decl(temp);
+ __ j(not_zero, &loop);
+ }
}
@@ -5347,11 +5368,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment());
- } else {
- DeoptimizeIf(no_condition, instr->environment());
- }
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ DeoptimizeIf(no_condition, instr->environment(), type);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 0a430964d0..5ad1c40a2b 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -102,7 +102,8 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
@@ -245,7 +246,7 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void SoftDeoptimize(LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 2cec68b097..d6f05c0a2d 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -710,11 +710,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -792,8 +787,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
LArithmeticT* result =
@@ -1324,17 +1319,17 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
@@ -1357,7 +1352,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ } else if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
@@ -1373,7 +1370,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1437,9 +1434,9 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- if (instr->representation().IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(left->representation().Equals(instr->representation()));
+ ASSERT(right->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
@@ -1472,7 +1469,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
+ } else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
@@ -1488,9 +1485,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LMulI* mul = new(zone()) LMulI(left, right);
@@ -1502,16 +1499,16 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1523,21 +1520,21 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
// preserve all input values for later uses.
bool use_lea = LAddI::UseLea(instr);
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
LOperand* right = use_lea
@@ -1555,7 +1552,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1652,13 +1649,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
@@ -1949,8 +1939,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
+ LUnallocated* temp = NULL;
+ if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+ if (instr->CanOmitPrototypeChecks()) return result;
return AssignEnvironment(result);
}
@@ -1962,8 +1954,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
return AssignEnvironment(result);
}
@@ -2124,13 +2118,9 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
+ ASSERT(instr->key()->representation().IsInteger32());
ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsSmi();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
if (!instr->is_external()) {
@@ -2168,7 +2158,6 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsSmi();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@@ -2180,8 +2169,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
- key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
@@ -2190,12 +2178,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
key = UseTempRegister(instr->key());
} else {
val = UseRegisterOrConstantAtStart(instr->value());
-
- if (clobbers_key) {
- key = UseTempRegister(instr->key());
- } else {
- key = UseRegisterOrConstantAtStart(instr->key());
- }
+ key = UseRegisterOrConstantAtStart(instr->key());
}
}
@@ -2215,8 +2198,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 32ee0b9d31..50e32d8cd6 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -40,12 +40,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
@@ -72,6 +66,7 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@@ -79,7 +74,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
@@ -89,15 +83,18 @@ class LCodeGen;
V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
+ V(Drop) \
V(DummyUse) \
V(ElementsKind) \
- V(MapEnumLength) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -105,13 +102,13 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
- V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,6 +121,7 @@ class LCodeGen;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -132,6 +130,7 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
V(MathExp) \
@@ -184,16 +183,10 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop) \
- V(InnerAllocatedObject)
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -433,6 +426,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -837,20 +831,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index b3e15905aa..13d7ddaa68 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2253,7 +2253,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -2299,7 +2300,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -3836,6 +3838,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a903ea11d6..542018fddd 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -794,11 +794,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- if (details.type() == CONSTANT_FUNCTION) {
- Handle<HeapObject> constant(
- HeapObject::cast(descriptors->GetValue(descriptor)));
- __ LoadHeapObject(scratch1, constant);
- __ cmpq(value_reg, scratch1);
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -857,7 +855,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- if (details.type() == CONSTANT_FUNCTION) {
+ if (details.type() == CONSTANT) {
ASSERT(value_reg.is(rax));
__ ret(0);
return;
@@ -1357,9 +1355,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(rax, value);
+ __ LoadObject(rax, value);
__ ret(0);
}
@@ -2500,7 +2498,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Code> code = CompileCustomCall(object, holder,
Handle<PropertyCell>::null(),
function, Handle<String>::cast(name),
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}