summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorFedor Indutny <fedor@indutny.com>2014-10-10 14:49:02 +0400
committerFedor Indutny <fedor@indutny.com>2014-10-10 14:49:02 +0400
commit6bcea4ff932144a5fd02affefd45164fbf471e67 (patch)
treea8e078c679b12f0daebe10ed254239cb0d79e146 /deps/v8/src/arm64
parent4fae2356d105e394115188a814097c4a95ae0c5d (diff)
downloadnode-new-6bcea4ff932144a5fd02affefd45164fbf471e67.tar.gz
deps: update v8 to 3.29.93.1
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc28
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h31
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc7
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc893
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h218
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc8
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h2
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc16
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc4
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc4
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc380
-rw-r--r--deps/v8/src/arm64/ic-arm64.cc1287
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc4
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc2
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc368
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.h26
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc130
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h512
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc649
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h81
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc275
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h99
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc26
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc651
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h171
-rw-r--r--deps/v8/src/arm64/stub-cache-arm64.cc1155
31 files changed, 2392 insertions, 4656 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 3b24197ebf..5e1bed1e8a 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -457,7 +457,7 @@ MemOperand::MemOperand()
}
-MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
DCHECK(base.Is64Bits() && !base.IsZero());
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 7f86e14a77..c1213e9693 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -33,6 +33,7 @@
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
#include "src/base/cpu.h"
namespace v8 {
@@ -227,7 +228,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
- for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
+ for (unsigned i = 0; i < arraysize(regs); i++) {
if (regs[i].IsRegister()) {
number_of_valid_regs++;
unique_regs |= regs[i].Bit();
@@ -601,7 +602,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -2208,6 +2209,17 @@ void Assembler::brk(int code) {
}
+void Assembler::EmitStringData(const char* string) {
+ size_t len = strlen(string) + 1;
+ DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
+}
+
+
void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
@@ -2443,7 +2455,7 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
- ptrdiff_t offset = addr.offset();
+ int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
@@ -2492,18 +2504,18 @@ void Assembler::LoadStore(const CPURegister& rt,
}
-bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
return is_int9(offset);
}
-bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
-bool Assembler::IsImmLSPair(ptrdiff_t offset, LSDataSize size) {
+bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
@@ -2664,7 +2676,7 @@ bool Assembler::IsImmLogical(uint64_t value,
int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
// Ensure that the index to the multipliers array is within bounds.
DCHECK((multiplier_idx >= 0) &&
- (static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers)));
+ (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
uint64_t multiplier = multipliers[multiplier_idx];
uint64_t candidate = (b - a) * multiplier;
@@ -3091,7 +3103,7 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
}
-void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) {
+void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
// nop (adr_far)
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 1bafce8454..9b1c5e6746 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -276,6 +276,11 @@ struct FPRegister : public CPURegister {
(kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+ // TODO(turbofan): Proper float32 support.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
// Return true if the register is one that crankshaft can allocate.
bool IsAllocatable() const {
return (Bit() & kAllocatableFPRegisters) != 0;
@@ -699,7 +704,7 @@ class MemOperand {
public:
inline MemOperand();
inline explicit MemOperand(Register base,
- ptrdiff_t offset = 0,
+ int64_t offset = 0,
AddrMode addrmode = Offset);
inline explicit MemOperand(Register base,
Register regoffset,
@@ -715,7 +720,7 @@ class MemOperand {
const Register& base() const { return base_; }
const Register& regoffset() const { return regoffset_; }
- ptrdiff_t offset() const { return offset_; }
+ int64_t offset() const { return offset_; }
AddrMode addrmode() const { return addrmode_; }
Shift shift() const { return shift_; }
Extend extend() const { return extend_; }
@@ -742,7 +747,7 @@ class MemOperand {
private:
Register base_;
Register regoffset_;
- ptrdiff_t offset_;
+ int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
@@ -1733,16 +1738,7 @@ class Assembler : public AssemblerBase {
// Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
- void EmitStringData(const char * string) {
- size_t len = strlen(string) + 1;
- DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
- EmitData(string, len);
- // Pad with NULL characters until pc_ is aligned.
- const char pad[] = {'\0', '\0', '\0', '\0'};
- STATIC_ASSERT(sizeof(pad) == kInstructionSize);
- byte* next_pc = AlignUp(pc_, kInstructionSize);
- EmitData(&pad, next_pc - pc_);
- }
+ void EmitStringData(const char* string);
// Pseudo-instructions ------------------------------------------------------
@@ -1859,6 +1855,9 @@ class Assembler : public AssemblerBase {
inline static Instr ImmBarrierType(int imm2);
inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+
// Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift);
@@ -1942,12 +1941,10 @@ class Assembler : public AssemblerBase {
void LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op);
- static bool IsImmLSUnscaled(ptrdiff_t offset);
- static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
- static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size);
+ static bool IsImmLSPair(int64_t offset, LSDataSize size);
void Logical(const Register& rd,
const Register& rn,
@@ -2292,7 +2289,7 @@ class PatchingAssembler : public Assembler {
// See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
- void PatchAdrFar(ptrdiff_t target_offset);
+ void PatchAdrFar(int64_t target_offset);
};
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 2e0aed77a5..e9ad8f165d 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -10,8 +10,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -781,8 +780,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 3ef118aae9..35b60f7a6f 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -8,159 +8,20 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: function info
- Register registers[] = { cp, x2 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: function
- Register registers[] = { cp, x1 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x3: array literals array
- // x2: array literal index
- // x1: constant elements
- Register registers[] = { cp, x3, x2, x1 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x3: object literals array
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- Register registers[] = { cp, x3, x2, x1, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: feedback vector
- // x3: call feedback slot
- Register registers[] = { cp, x2, x3 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x1 function the function to call
- Register registers[] = {cp, x1};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x0 : number of arguments
- // x1 : the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, x0, x1, x2};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: length
- // x1: index (of last match)
- // x0: string
- Register registers[] = { cp, x2, x1, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value (js_array)
- // x1: to_map
- Register registers[] = { cp, x0, x1 };
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(entry));
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value to compare
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return cp; }
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// cp: context
// x1: function
@@ -170,247 +31,90 @@ static void InitializeArrayConstructorDescriptor(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, x1, x2 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, x1, x2, x0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // cp: context
- // x1: constructor function
- // x0: number of arguments to the constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, x1 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, x1, x0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: allocation site
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x2, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- static PlatformInterfaceDescriptor noInlineDescriptor =
- PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { cp, // context
- x1, // JSFunction
- x0, // actual number of arguments
- x2, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { cp, // context
- x2, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { cp, // context
- x2, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { cp, // context
- x0, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { cp, // context
- x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1)));
+ x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor->GetEnvironmentParameterRegister(i));
+ queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
}
queue.PushQueued();
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -519,30 +223,30 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if ((cond == lt) || (cond == gt)) {
__ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
ge);
+ } else if (cond == eq) {
+ __ JumpIfHeapNumber(right, &heap_number);
} else {
Register right_type = scratch;
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if ((cond == le) || (cond == ge)) {
- __ Cmp(right_type, ODDBALL_TYPE);
- __ B(ne, &return_equal);
- __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ Mov(result, GREATER);
- } else {
- // undefined >= undefined should fail.
- __ Mov(result, LESS);
- }
- __ Ret();
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
}
+ __ Ret();
}
}
@@ -646,10 +350,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register right,
FPRegister left_d,
FPRegister right_d,
- Register scratch,
Label* slow,
bool strict) {
- DCHECK(!AreAliased(left, right, scratch));
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
(right.is(x0) && left.is(x1)));
@@ -663,8 +365,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// If right is not a number and left is a smi, then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(right, &is_heap_number);
// Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!right.is(result)) {
__ Mov(result, NOT_EQUAL);
@@ -674,7 +375,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(right, slow);
}
// Left is the smi. Right is a heap number. Load right value into right_d, and
@@ -689,8 +390,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// If left is not a number and right is a smi then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(left, &is_heap_number);
// Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!left.is(result)) {
__ Mov(result, NOT_EQUAL);
@@ -700,7 +400,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(left, slow);
}
// Right is the smi. Left is a heap number. Load left value into left_d, and
@@ -767,18 +467,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(input, fail);
}
// We could be strict about internalized/non-internalized here, but as long as
// hydrogen doesn't care, the stub doesn't have to care either.
@@ -786,15 +483,15 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = x1;
Register rhs = x0;
Register result = x0;
Condition cond = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
@@ -827,7 +524,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// rhs_d, left into lhs_d.
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in rhs_d and
@@ -901,20 +598,20 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
&flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ Bind(&flat_string_check);
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
- x15, &slow);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
+ x15, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
x11);
if (cond == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
- x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
- x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Never fall through to here.
@@ -964,7 +661,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
saved_fp_regs.Remove(*(masm->FPTmpList()));
__ PushCPURegList(saved_regs);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ PushCPURegList(saved_fp_regs);
}
@@ -973,7 +670,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ PopCPURegList(saved_fp_regs);
}
__ PopCPURegList(saved_regs);
@@ -1024,8 +721,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register result_tagged = x0;
Register base_tagged = x10;
- Register exponent_tagged = x11;
- Register exponent_integer = x12;
+ Register exponent_tagged = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent_tagged.is(x11));
+ Register exponent_integer = MathPowIntegerDescriptor::exponent();
+ DCHECK(exponent_integer.is(x12));
Register scratch1 = x14;
Register scratch0 = x15;
Register saved_lr = x19;
@@ -1044,7 +743,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label done;
// Unpack the inputs.
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi;
Label unpack_exponent;
@@ -1068,20 +767,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// exponent_tagged is a heap number, so load its double value.
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
__ JumpIfSmi(exponent_tagged, &exponent_is_smi);
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
}
// Handle double (heap number) exponents.
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
// Detect integer exponents stored as doubles and handle those in the
// integer fast-path.
__ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
scratch0_double, &exponent_is_integer);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
FPRegister half_double = d3;
FPRegister minus_half_double = d4;
// Detect square root case. Crankshaft detects constant +/-0.5 at compile
@@ -1232,7 +931,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Fcmp(result_double, 0.0);
__ B(&done, ne);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Bail out to runtime code.
__ Bind(&call_runtime);
// Put the arguments back on the stack.
@@ -1373,7 +1072,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
// registers.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_, x10, 3);
+ __ EnterExitFrame(save_doubles(), x10, 3);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
@@ -1470,7 +1169,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles_, x10, true);
+ __ LeaveExitFrame(save_doubles(), x10, true);
DCHECK(jssp.Is(__ StackPointer()));
// Pop or drop the remaining stack slots and return from the stub.
// jssp[24]: Arguments array (of size argc), including receiver.
@@ -1542,7 +1241,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// x4: argv.
// Output:
// x0: result.
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
@@ -1573,7 +1272,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Fmov(fp_zero, 0.0);
// Build an entry frame (see layout below).
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
__ Mov(x12, Smi::FromInt(marker));
@@ -1658,8 +1357,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// x2: receiver.
// x3: argc.
// x4: argv.
- ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
- : Builtins::kJSEntryTrampoline,
+ ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
+ ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
isolate());
__ Mov(x10, entry);
@@ -1711,7 +1411,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@@ -1902,21 +1602,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() {
- // Object to check (instanceof lhs).
- return x11;
-}
-
-
-Register InstanceofStub::right() {
- // Constructor function (instanceof rhs).
- return x10;
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- Register arg_count = x0;
- Register key = x1;
+ Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
+ Register key = ArgumentsAccessReadDescriptor::index();
+ DCHECK(arg_count.is(x0));
+ DCHECK(key.is(x1));
// The displacement is the offset of the last parameter (if any) relative
// to the frame pointer.
@@ -2265,6 +1955,29 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ Bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Stack layout on entry.
// jssp[0]: number of parameters (tagged)
@@ -2416,7 +2129,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// w0 string_type type of subject string
// x2 jsstring_length subject string length
// x3 jsregexp_object JSRegExp object
- // w4 string_encoding ASCII or UC16
+ // w4 string_encoding Latin1 or UC16
// w5 sliced_string_offset if the string is a SlicedString
// offset to the underlying string
// w6 string_representation groups attributes of the string:
@@ -2614,17 +2327,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kStringEncodingMask == 0x04);
// Find the code object based on the assumptions above.
- // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
- // We will need the encoding later: ASCII = 0x04
- // UC16 = 0x00
+ // We will need the encoding later: Latin1 = 0x04
+ // UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
__ Add(x10, regexp_data, x10);
- __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
// (E) Carry on. String handling is done.
@@ -2667,13 +2380,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
- // string_encoding: if ASCII: 0x04
- // if UC16: 0x00
+ // string_encoding: if Latin1: 0x04
+ // if UC16: 0x00
STATIC_ASSERT(kStringEncodingMask == 0x04);
__ Ubfx(string_encoding, string_encoding, 2, 1);
__ Eor(string_encoding, string_encoding, 1);
- // string_encoding: if ASCII: 0
- // if UC16: 1
+ // string_encoding: if Latin1: 0
+ // if UC16: 1
// Convert string positions from characters to bytes.
// Previous index is in x1.
@@ -2936,9 +2649,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
@@ -3143,7 +2856,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallFunctionStub::Generate");
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -3258,7 +2971,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -3278,7 +2991,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
Register function = x1;
@@ -3297,7 +3010,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ B(ne, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Compute the receiver in sloppy mode.
@@ -3317,7 +3030,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(masm, argc, function, type, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -3342,7 +3055,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -3356,11 +3069,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("CallICStub[Miss]");
// Get the receiver of the function from the stack; 1 ~ return address.
- __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
+ __ Peek(x4, (arg_count() + 1) * kPointerSize);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3369,6 +3082,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(x4, x1, x2, x3);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3418,11 +3134,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
@@ -3469,7 +3181,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ B(hi, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one-byte char code.
__ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
@@ -3494,10 +3206,10 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
- DCHECK(state_ == CompareIC::SMI);
- ASM_LOCATION("ICCompareStub[Smis]");
+ DCHECK(state() == CompareICState::SMI);
+ ASM_LOCATION("CompareICStub[Smis]");
Label miss;
// Bail out (to 'miss') unless both x0 and x1 are smis.
__ JumpIfEitherNotSmi(x0, x1, &miss);
@@ -3517,9 +3229,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
- ASM_LOCATION("ICCompareStub[HeapNumbers]");
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
+ ASM_LOCATION("CompareICStub[HeapNumbers]");
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss, handle_lhs, values_in_d_regs;
@@ -3531,10 +3243,10 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(rhs, &miss);
}
@@ -3543,15 +3255,13 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load rhs if it's a heap number.
__ JumpIfSmi(rhs, &handle_lhs);
- __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
__ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Load lhs if it's a heap number.
__ Bind(&handle_lhs);
__ JumpIfSmi(lhs, &values_in_d_regs);
- __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ Bind(&values_in_d_regs);
@@ -3563,20 +3273,20 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ Bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
__ JumpIfSmi(lhs, &unordered);
- __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ B(&unordered);
}
__ Bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
}
@@ -3585,9 +3295,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
- ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+ ASM_LOCATION("CompareICStub[InternalizedStrings]");
Label miss;
Register result = x0;
@@ -3623,9 +3333,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
- ASM_LOCATION("ICCompareStub[UniqueNames]");
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ ASM_LOCATION("CompareICStub[UniqueNames]");
DCHECK(GetCondition() == eq);
Label miss;
@@ -3648,8 +3358,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// To avoid a miss, each instance type should be either SYMBOL_TYPE or it
// should have kInternalizedTag set.
- __ JumpIfNotUniqueName(lhs_instance_type, &miss);
- __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
// Unique names are compared by identity.
STATIC_ASSERT(EQUAL == 0);
@@ -3662,13 +3372,13 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
- ASM_LOCATION("ICCompareStub[Strings]");
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ ASM_LOCATION("CompareICStub[Strings]");
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
Register result = x0;
Register rhs = x0;
@@ -3715,18 +3425,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ Bind(&not_internalized_strings);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- lhs_type, rhs_type, x12, x13, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
+ x13, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, lhs, rhs, x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, lhs, rhs, x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Handle more complex cases in runtime.
@@ -3743,9 +3453,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
- ASM_LOCATION("ICCompareStub[Objects]");
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
+ ASM_LOCATION("CompareICStub[Objects]");
Label miss;
@@ -3767,8 +3477,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[KnownObjects]");
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownObjects]");
Label miss;
@@ -3797,10 +3507,10 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
// This method handles the case where a compare stub had the wrong
// implementation. It calls a miss handler, which re-writes the stub. All other
-// ICCompareStub::Generate* methods should fall back into this one if their
+// CompareICStub::Generate* methods should fall back into this one if their
// operands were not the expected types.
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[Miss]");
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[Miss]");
Register stub_entry = x11;
{
@@ -3814,7 +3524,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Preserve some caller-saved registers.
__ Push(x1, x0, lr);
// Push the arguments.
- __ Mov(op, Smi::FromInt(op_));
+ __ Mov(op, Smi::FromInt(this->op()));
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
@@ -3831,67 +3541,6 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- DCHECK(!AreAliased(hash, character));
-
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ Add(hash, character, Operand::UntagSmi(hash));
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- DCHECK(!AreAliased(hash, character));
-
- // hash += character;
- __ Add(hash, hash, character);
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
- Register scratch_w = scratch.W();
- DCHECK(!AreAliased(hash_w, scratch_w));
-
- // hash += hash << 3;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
- // hash ^= hash >> 11;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
- // hash += hash << 15;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
-
- __ Ands(hash_w, hash_w, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- __ Mov(scratch_w, StringHasher::kZeroHash);
- __ Csel(hash_w, scratch_w, hash_w, eq);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("SubStringStub::Generate");
Label runtime;
@@ -4034,8 +3683,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
- __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
- &runtime);
+ __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
__ B(&set_slice_header);
__ Bind(&two_byte_slice);
@@ -4085,12 +3734,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ Bind(&allocate_result);
- // Sequential ASCII string. Allocate the result.
+ // Sequential one-byte string. Allocate the result.
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
// Locate first character of substring to copy.
__ Add(substring_char0, unpacked_char0, from);
@@ -4143,12 +3792,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
Register result = x0;
Register left_length = scratch1;
@@ -4176,8 +3822,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters. Falls through if all characters are equal.
__ Bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
// Characters in strings are equal.
__ Mov(result, Smi::FromInt(EQUAL));
@@ -4185,13 +3831,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
Label result_not_equal, compare_lengths;
@@ -4206,9 +3848,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Cbz(min_length, &compare_lengths);
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ Bind(&compare_lengths);
@@ -4230,14 +3871,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal) {
DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
// Change index to run from -length to -1 by adding length to string
@@ -4285,13 +3921,14 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // Compare flat one-byte strings natively. Remove arguments from stack first,
// as this function will generate a return.
__ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
+ x14, x15);
__ Bind(&runtime);
@@ -4328,7 +3965,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
@@ -4338,16 +3975,14 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// but we need to save them before using them.
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
- __ CheckPageFlagSet(regs_.object(),
- value,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
@@ -4357,11 +3992,9 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
InformIncrementalMarker(masm);
regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
__ Bind(&dont_need_remembered_set);
}
@@ -4375,7 +4008,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
DCHECK(!address.Is(regs_.object()));
@@ -4391,7 +4024,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
isolate());
__ CallCFunction(function, 3, 0);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4418,25 +4051,22 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ Bind(&on_black);
// Get the value from the slot.
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
- __ CheckPageFlagClear(value,
- regs_.scratch1(),
+ __ CheckPageFlagClear(val, regs_.scratch1(),
MemoryChunk::kEvacuationCandidateMask,
&ensure_not_white);
@@ -4451,7 +4081,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(value,
+ __ EnsureNotWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
@@ -4461,11 +4091,9 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
@@ -4493,12 +4121,10 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ adr(xzr, &skip_to_incremental_compacting);
}
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -4586,7 +4212,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4596,6 +4222,20 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
// The entry hook is a "BumpSystemStackPointer" instruction (sub),
// followed by a "Push lr" instruction, followed by a call.
@@ -4836,7 +4476,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ Ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ Bind(&good);
}
@@ -4919,11 +4559,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ Cmp(entry_key, key);
__ B(eq, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -4931,7 +4571,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup, probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Mov(result, 0);
__ Ret();
}
@@ -5111,7 +4751,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Register argc = x0;
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label zero_case, n_case;
__ Cbz(argc, &zero_case);
__ Cmp(argc, 1);
@@ -5128,11 +4768,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
// N arguments.
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -5143,7 +4783,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x0 : argc (only if argument_count() == ANY)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
// -- sp[0] : return address
@@ -5295,9 +4935,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = x1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -5387,7 +5027,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- x2 : api_function_address
// -----------------------------------
- Register api_function_address = x2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(x2));
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 75a945299f..03dab5bac2 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
-#include "src/ic-inl.h"
-
namespace v8 {
namespace internal {
@@ -14,42 +12,25 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
- // TODO(all): These don't seem to be used any more. Delete them.
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
+ // Compares two flat one-byte strings and returns result in x0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
+
+ // Compare two flat one-byte strings for equality and returns result in x0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
private:
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -60,12 +41,12 @@ class StoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; }
+
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return StoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
@@ -75,11 +56,10 @@ class RestoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return RestoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
@@ -95,16 +75,22 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ DCHECK(object.Is64Bits());
+ DCHECK(value.Is64Bits());
+ DCHECK(address.Is64Bits());
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -176,6 +162,8 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(GetMode(stub) == mode);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
@@ -282,49 +270,43 @@ class RecordWriteStub: public PlatformCodeStub {
friend class RecordWriteStub;
};
- // A list of stub variants which are pregenerated.
- // The variants are stored in the same format as the minor key, so
- // MinorKeyFor() can be used to populate and check this list.
- static const int kAheadOfTime[];
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
-
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return MinorKeyFor(object_, value_, address_, remembered_set_action_,
- save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- static int MinorKeyFor(Register object,
- Register value,
- Register address,
- RememberedSetAction action,
- SaveFPRegsMode fp_mode) {
- DCHECK(object.Is64Bits());
- DCHECK(value.Is64Bits());
- DCHECK(address.Is64Bits());
- return ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(action) |
- SaveFPRegsModeBits::encode(fp_mode);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {};
@@ -333,11 +315,6 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
@@ -348,14 +325,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() const { return DirectCEntry; }
- int MinorKey() const { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
@@ -364,9 +340,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -398,78 +374,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
-
- int MinorKey() const { return LookupModeBits::encode(mode_); }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- // Compares two flat ASCII strings and returns result in x0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compare two flat ASCII strings for equality and returns result
- // in x0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-
-class PlatformInterfaceDescriptor {
- public:
- explicit PlatformInterfaceDescriptor(
- TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) { }
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 16b6d3b188..91eaba7957 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -485,15 +485,15 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, SXTW, 1));
__ B(&done);
- __ Bind(&ascii);
- // Ascii string.
+ __ Bind(&one_byte);
+ // One-byte string.
__ Ldrb(result, MemOperand(string, index, SXTW));
__ Bind(&done);
}
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index 9ef148cc40..2f01c510de 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -6,7 +6,7 @@
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
index 746d9a8b47..f57d5b5ab2 100644
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -236,17 +236,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm64.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
@@ -260,9 +260,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index cf7dc34c58..5cca85ea28 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -17,13 +17,13 @@ namespace internal {
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
- visitors_.push_front(new_visitor);
+ visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
- visitors_.push_back(new_visitor);
+ visitors_.push_front(new_visitor);
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index d40468029f..d67dc8fcd9 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -89,7 +89,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index b8b1d5d256..ac7cb37322 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -1517,7 +1517,9 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
case LDR_w_lit:
case LDR_x_lit:
case LDR_s_lit:
- case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ case LDR_d_lit:
+ AppendToOutput("(addr 0x%016" PRIxPTR ")", instr->LiteralAddress());
+ break;
default: UNREACHABLE();
}
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index 2e63814bd3..9d544825e4 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
@@ -1051,7 +1052,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1178,7 +1180,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&fixed_array);
__ LoadObject(x1, FeedbackVector());
- __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
@@ -1319,9 +1321,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ Mov(x2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1341,6 +1341,26 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ Mov(x10, Operand(isolate()->factory()->undefined_value()));
+ __ cmp(x0, x10);
+ Label done;
+ __ b(&done, ne);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1384,10 +1404,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Bind(&fast);
}
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
@@ -1469,10 +1489,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(var->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
@@ -1682,9 +1702,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(x0));
- __ Mov(StoreIC::NameRegister(), Operand(key->value()));
- __ Peek(StoreIC::ReceiverRegister(), 0);
+ DCHECK(StoreDescriptor::ValueRegister().is(x0));
+ __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1844,13 +1864,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1862,17 +1888,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = x10;
+ __ Peek(scratch, kPointerSize);
+ __ Push(scratch, result_register());
+ }
+ break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadIC::NameRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadDescriptor::NameRegister(), 0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1893,6 +1929,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1939,6 +1979,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1949,9 +1992,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Mov(LoadIC::NameRegister(), Operand(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -1960,12 +2005,24 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
@@ -1993,10 +2050,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(x10, &both_smis);
__ Bind(&stub_call);
- BinaryOpICStub stub(isolate(), op, mode);
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
{
Assembler::BlockPoolsScope scope(masm_);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
__ B(&done);
@@ -2019,16 +2077,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Ubfx(right, right, kSmiShift, 5);
__ Lsl(result, left, right);
break;
- case Token::SHR: {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- __ Tbnz(left, kXSignBit, &stub_call);
- __ Bind(&right_not_zero);
+ case Token::SHR:
+ // If `left >>> right` >= 0x80000000, the result is not representable in a
+ // signed 32-bit smi.
__ Ubfx(right, right, kSmiShift, 5);
- __ Lsr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
+ __ Lsr(x10, left, right);
+ __ Tbnz(x10, kXSignBit, &stub_call);
+ __ Bic(result, x10, kSmiShiftMask);
break;
- }
case Token::ADD:
__ Adds(x10, left, right);
__ B(vs, &stub_call);
@@ -2079,11 +2135,11 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ Pop(x1);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
context()->Plug(x0);
@@ -2116,9 +2172,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy.
- __ Mov(StoreIC::ReceiverRegister(), x0);
- __ Pop(StoreIC::ValueRegister()); // Restore value.
- __ Mov(StoreIC::NameRegister(),
+ __ Mov(StoreDescriptor::ReceiverRegister(), x0);
+ __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
@@ -2127,11 +2183,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Mov(KeyedStoreIC::NameRegister(), x0);
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::ValueRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Mov(StoreDescriptor::NameRegister(), x0);
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::ValueRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2158,8 +2214,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Mov(StoreIC::NameRegister(), Operand(var->name()));
- __ Ldr(StoreIC::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2231,8 +2287,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreIC::ReceiverRegister());
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2240,6 +2297,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // x0 : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(x0);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
@@ -2247,12 +2322,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(x0));
+ __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(x0));
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2265,16 +2338,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), x0);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), x0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), x0);
- __ Pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), x0);
+ __ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(x0);
}
@@ -2294,12 +2374,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2310,7 +2389,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2322,6 +2402,45 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+
+ // Load the function from the receiver.
+ const Register scratch = x10;
+ SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(x0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(x0);
+ __ Peek(scratch, kPointerSize);
+ __ Push(x0, scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ Poke(x0, kPointerSize);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2332,8 +2451,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ Peek(LoadIC::ReceiverRegister(), 0);
- __ Move(LoadIC::NameRegister(), x0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
+ __ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2341,11 +2460,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Pop(x10);
__ Push(x0, x10);
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2494,15 +2613,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
-
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -2822,7 +2947,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
// Only a HeapNumber can be -0.0, so return false if we have something else.
- __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
// Test the bit pattern.
__ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
@@ -3015,7 +3140,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ Bind(&function);
- __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ LoadRoot(x0, Heap::kFunction_stringRootIndex);
__ B(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3134,9 +3259,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2;
Register scratch = x10;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index);
if (FLAG_debug_code) {
@@ -3164,9 +3289,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2;
Register scratch = x10;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index);
if (FLAG_debug_code) {
@@ -3507,8 +3632,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3560,7 +3685,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Get the FixedArray containing array's elements.
__ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths.
__ Mov(string_length, 0);
__ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
@@ -3575,14 +3700,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end.
if (FLAG_debug_code) {
__ Cmp(array_length, 0);
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ Bind(&loop);
__ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ Ldrsw(scratch1,
UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ Adds(string_length, string_length, scratch1);
@@ -3604,11 +3729,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (not smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
__ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string.
@@ -3628,13 +3753,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array (not smi).
- __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
- // TODO(all): useless unless AllocateAsciiString trashes the register.
+ // TODO(all): useless unless AllocateOneByteString trashes the register.
__ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
__ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
@@ -3662,7 +3787,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ Bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3673,7 +3798,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ Strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3749,15 +3874,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(LoadIC::ReceiverRegister(),
+ __ Ldr(LoadDescriptor::ReceiverRegister(),
FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(LoadIC::ReceiverRegister());
+ __ Push(LoadDescriptor::ReceiverRegister());
// Load the function from the receiver.
Handle<String> name = expr->name();
- __ Mov(LoadIC::NameRegister(), Operand(name));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -3922,6 +4047,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -3937,14 +4067,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop);
} else {
// KEYED_PROPERTY
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadIC::NameRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadDescriptor::NameRegister(), 0);
EmitKeyedPropertyLoad(prop);
}
}
@@ -4025,8 +4155,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
Assembler::BlockPoolsScope scope(masm_);
- BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}
__ Bind(&done);
@@ -4054,9 +4185,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Mov(StoreIC::NameRegister(),
+ __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreIC::ReceiverRegister());
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4069,11 +4200,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(KeyedStoreIC::NameRegister());
- __ Pop(KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Pop(StoreDescriptor::NameRegister());
+ __ Pop(StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4095,10 +4225,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(proxy->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4271,7 +4401,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4332,12 +4462,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// don't want to spend too much time on it now.
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ Push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ B(&suspend);
@@ -4372,7 +4502,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
__ Str(x1, FieldMemOperand(result_register(),
@@ -4384,7 +4514,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -4393,8 +4523,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
@@ -4454,10 +4584,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Peek(load_receiver, 1 * kPointerSize);
__ Peek(load_name, 2 * kPointerSize);
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
@@ -4474,7 +4604,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
@@ -4487,7 +4617,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
diff --git a/deps/v8/src/arm64/ic-arm64.cc b/deps/v8/src/arm64/ic-arm64.cc
deleted file mode 100644
index e08fcfd884..0000000000
--- a/deps/v8/src/arm64/ic-arm64.cc
+++ /dev/null
@@ -1,1287 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/arm64/assembler-arm64.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
- __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
- __ B(eq, global_object);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, scratch1, scratch2));
- DCHECK(!AreAliased(result, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal property.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ B(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ Ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- static const int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, kTypeAndReadOnlyMask);
- __ B(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- static const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
- __ Str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ Mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map_scratch,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- DCHECK(!AreAliased(map_scratch, scratch));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
- __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch, interceptor_bit, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object, we enter the
- // runtime system to make sure that indexing into string objects work
- // as intended.
- STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- __ Cmp(scratch, JS_OBJECT_TYPE);
- __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
-//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver on exit.
-//
-// elements_map - holds the elements map on exit if the not_fast_array branch is
-// taken. Otherwise, this is used as a scratch register.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register elements_map,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* slow) {
- DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
-
- // Check for fast array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
- not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
-
- // The elements_map register is only used for the not_fast_array path, which
- // was handled above. From this point onward it is a scratch register.
- Register scratch1 = elements_map;
-
- // Check that the key (index) is within bounds.
- __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(key, scratch1);
- __ B(hs, slow);
-
- // Fast case: Do the load.
- __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
-
- // Move the value to the result register.
- // 'result' can alias with 'receiver' or 'key' but these two must be
- // preserved if we jump to 'slow'.
- __ Mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map_scratch,
- Register hash_scratch,
- Label* index_string,
- Label* not_unique) {
- DCHECK(!AreAliased(key, map_scratch, hash_scratch));
-
- // Is the key a name?
- Label unique;
- __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
- not_unique, hi);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ B(eq, &unique);
-
- // Is the string an array index with cached numeric value?
- __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch,
- Name::kContainsCachedArrayIndexMask,
- index_string);
-
- // Is the string internalized? We know it's a string, so a single bit test is
- // enough.
- __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
- __ Bind(&unique);
- // Fall through if the key is a unique name.
-}
-
-
-// Neither 'object' nor 'key' are modified by this function.
-//
-// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
-// left with the object's elements map. Otherwise, it is used as a scratch
-// register.
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register map,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
-
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
- slow_case, lt);
-
- // Check that the key is a positive smi.
- __ JumpIfNotSmi(key, slow_case);
- __ Tbnz(key, kXSignBit, slow_case);
-
- // Load the elements object and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup.
- __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Sub(scratch1, scratch1, Smi::FromInt(2));
- __ Cmp(key, scratch1);
- __ B(hs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- static const int offset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ Add(scratch1, map, offset);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
-
- // Load value from context and return it.
- __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
- __ SmiUntag(scratch1);
- __ Lsl(scratch1, scratch1, kPointerSizeLog2);
- __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
- // The base of the result (scratch2) is passed to RecordWrite in
- // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
- return MemOperand(scratch2, scratch1);
-}
-
-
-// The 'parameter_map' register must be loaded with the parameter map of the
-// arguments object and is overwritten.
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- DCHECK(!AreAliased(key, parameter_map, scratch));
-
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(
- backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Cmp(key, scratch);
- __ B(hs, slow_case);
-
- __ Add(backing_store,
- backing_store,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch, key);
- return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(name.is(x2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = x0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
- Label slow;
-
- __ Ldr(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ Bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
- ASM_LOCATION("LoadIC::GenerateMiss");
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
-
- // Perform tail call to the entry.
- __ Push(ReceiverRegister(), NameRegister());
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in lr.
- Register result = x0;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- Label miss, unmapped;
-
- Register map_scratch = x0;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
- __ Ldr(result, mapped_location);
- __ Ret();
-
- __ Bind(&unmapped);
- // Parameter map is left in map_scratch when a jump on unmapped is done.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
- __ Ldr(result, unmapped_location);
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
- Label slow, notin;
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register map = x3;
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register mapped1 = x4;
- Register mapped2 = x5;
-
- MemOperand mapped =
- GenerateMappedArgumentsLookup(masm, receiver, key, map,
- mapped1, mapped2,
- &notin, &slow);
- Operand mapped_offset = mapped.OffsetAsOperand();
- __ Str(value, mapped);
- __ Add(x10, mapped.base(), mapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
-
- __ Bind(&notin);
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register unmapped1 = map; // This is assumed to alias 'map'.
- Register unmapped2 = x4;
- MemOperand unmapped =
- GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
- Operand unmapped_offset = unmapped.OffsetAsOperand();
- __ Str(value, unmapped);
- __ Add(x10, unmapped.base(), unmapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(unmapped.base(), x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
-
- __ Push(ReceiverRegister(), NameRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return x1; }
-const Register LoadIC::NameRegister() { return x2; }
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return x0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return x3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return x1; }
-const Register StoreIC::NameRegister() { return x2; }
-const Register StoreIC::ValueRegister() { return x0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return x3;
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label check_number_dictionary;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
- __ Ret();
-
- __ Bind(&check_number_dictionary);
- __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
- // Check whether we have a number dictionary.
- __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
- __ LoadFromNumberDictionary(
- slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
- __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label probe_dictionary, property_array_property;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
-
- // If the receiver is a fast-case object, check the keyed lookup cache.
- // Otherwise probe the dictionary.
- __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
- // We keep the map of the receiver in scratch1.
- Register receiver_map = scratch1;
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
- __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(scratch2, scratch2, mask);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ Mov(scratch3, cache_keys);
- __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and make scratch3 pointing to the next entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, &try_next_entry);
- __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
- __ Cmp(key, scratch4);
- __ B(eq, &hit_on_nth_entry[i]);
- __ Bind(&try_next_entry);
- }
-
- // Last entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, slow);
- __ Ldr(scratch4, MemOperand(scratch3));
- __ Cmp(key, scratch4);
- __ B(ne, slow);
-
- // Get field offset.
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ Bind(&hit_on_nth_entry[i]);
- __ Mov(scratch3, cache_field_offsets);
- if (i != 0) {
- __ Add(scratch2, scratch2, i);
- }
- __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
- __ Ldrb(scratch5,
- FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
- __ Subs(scratch4, scratch4, scratch5);
- __ B(ge, &property_array_property);
- if (i != 0) {
- __ B(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ Bind(&load_in_object_property);
- __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
- __ Add(scratch5, scratch5, scratch4); // Index from start of object.
- __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
- __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Load property array property.
- __ Bind(&property_array_property);
- __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it exists.
- __ Bind(&probe_dictionary);
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
- // Load the property.
- GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, scratch1, scratch2);
- __ Ret();
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name;
-
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(key.is(x2));
- DCHECK(receiver.is(x1));
-
- __ JumpIfNotSmi(key, &check_name);
- __ Bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- // Slow case.
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3);
- GenerateRuntimeGetProperty(masm);
-
- __ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
-
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- __ Bind(&index_name);
- __ IndexFromHash(x3, key);
- // Now jump to the place where smi keys are handled.
- __ B(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
- Register result = x0;
- Register scratch = x3;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = x3;
- Register scratch2 = x4;
- DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Get the map of the receiver.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(kSlowCaseBitFieldMask ==
- ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
- __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
- __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateMiss");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Push strict_mode for runtime call.
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- DCHECK(!AreAliased(
- value, key, receiver, receiver_map, elements_map, elements, x10, x11));
-
- Label transition_smi_elements;
- Label transition_double_elements;
- Label fast_double_without_map_check;
- Label non_double_value;
- Label finish_store;
-
- __ Bind(fast_object);
- if (check_map == kCheckMap) {
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because there
- // may be a callback on the element.
- Label holecheck_passed;
- __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
- __ bind(&holecheck_passed);
-
- // Smi stores don't require further checks.
- __ JumpIfSmi(value, &finish_store);
-
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
- __ Bind(&finish_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
-
- Register address = x11;
- __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Str(value, MemOperand(address));
-
- Label dont_record_write;
- __ JumpIfSmi(value, &dont_record_write);
-
- // Update write barrier for the elements array address.
- __ Mov(x10, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Bind(&dont_record_write);
- __ Ret();
-
-
- __ Bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so go to
- // the runtime.
- __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
- __ Bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements,
- x10,
- d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
-
- __ Bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&fast_double_without_map_check);
-
- __ Bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
-
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-
- __ Bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
- Label slow;
- Label array;
- Label fast_object;
- Label extra;
- Label fast_object_grow;
- Label fast_double_grow;
- Label fast_double;
-
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register receiver_map = x3;
- Register elements = x4;
- Register elements_map = x5;
-
- __ JumpIfNotSmi(key, &slow);
- __ JumpIfSmi(receiver, &slow);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
-
- // Check if the object is a JS array or not.
- Register instance_type = x10;
- __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
- __ B(eq, &array);
- // Check that the object is some kind of JSObject.
- __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
- __ B(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(hi, &fast_object);
-
-
- __ Bind(&slow);
- // Slow case, handle jump to runtime.
- // Live values:
- // x0: value
- // x1: key
- // x2: receiver
- GenerateRuntimeSetProperty(masm, strict_mode);
-
-
- __ Bind(&extra);
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
-
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(ls, &slow);
-
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(eq, &fast_object_grow);
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ B(eq, &fast_double_grow);
- __ B(&slow);
-
-
- __ Bind(&array);
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(eq, &extra); // We can handle the case where we are appending 1 element.
- __ B(lo, &slow);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register value = ValueRegister();
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register dictionary = x3;
- DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
-
- __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
-
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, name and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return al;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address info_address =
- Assembler::return_address_from_call_start(address);
-
- InstructionSequence* patch_info = InstructionSequence::At(info_address);
- return patch_info->IsInlineData();
-}
-
-
-// Activate a SMI fast-path by patching the instructions generated by
-// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
-// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The patch information is encoded in the instruction stream using
- // instructions which have no side effects, so we can safely execute them.
- // The patch information is encoded directly after the call to the helper
- // function which is requesting this patch operation.
- Address info_address =
- Assembler::return_address_from_call_start(address);
- InlineSmiCheckInfo info(info_address);
-
- // Check and decode the patch information instruction.
- if (!info.HasSmiCheck()) {
- return;
- }
-
- if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
- address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
- }
-
- // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
- // and JumpPatchSite::EmitJumpIfSmi().
- // Changing
- // tb(n)z xzr, #0, <target>
- // to
- // tb(!n)z test_reg, #0, <target>
- Instruction* to_patch = info.SmiCheck();
- PatchingAssembler patcher(to_patch, 1);
- DCHECK(to_patch->IsTestBranch());
- DCHECK(to_patch->ImmTestBranchBit5() == 0);
- DCHECK(to_patch->ImmTestBranchBit40() == 0);
-
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
-
- int branch_imm = to_patch->ImmTestBranch();
- Register smi_reg;
- if (check == ENABLE_INLINED_SMI_CHECK) {
- DCHECK(to_patch->Rt() == xzr.code());
- smi_reg = info.SmiRegister();
- } else {
- DCHECK(check == DISABLE_INLINED_SMI_CHECK);
- DCHECK(to_patch->Rt() != xzr.code());
- smi_reg = xzr;
- }
-
- if (to_patch->Mask(TestBranchMask) == TBZ) {
- // This is JumpIfNotSmi(smi_reg, branch_imm).
- patcher.tbnz(smi_reg, 0, branch_imm);
- } else {
- DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
- // This is JumpIfSmi(smi_reg, branch_imm).
- patcher.tbz(smi_reg, 0, branch_imm);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index a6ca6affae..71094baa87 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -182,8 +182,8 @@ LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
}
-ptrdiff_t Instruction::ImmPCOffset() {
- ptrdiff_t offset;
+int64_t Instruction::ImmPCOffset() {
+ int64_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index bd4e753779..374e2464c3 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -338,7 +338,7 @@ class Instruction {
// Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction.
// The offset returned is unscaled.
- ptrdiff_t ImmPCOffset();
+ int64_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
@@ -352,9 +352,9 @@ class Instruction {
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
- uint8_t* LiteralAddress() {
+ uintptr_t LiteralAddress() {
int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
- return reinterpret_cast<uint8_t*>(this) + offset;
+ return reinterpret_cast<uintptr_t>(this) + offset;
}
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 59982d975b..da505ff294 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -107,7 +107,7 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
}
}
- static const int num_counters = ARRAY_SIZE(kCounterList);
+ static const int num_counters = arraysize(kCounterList);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
new file mode 100644
index 0000000000..690c8c28ee
--- /dev/null
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -0,0 +1,368 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return x1; }
+const Register LoadDescriptor::NameRegister() { return x2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return x1; }
+const Register StoreDescriptor::NameRegister() { return x2; }
+const Register StoreDescriptor::ValueRegister() { return x0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
+
+
+const Register InstanceofDescriptor::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+const Register InstanceofDescriptor::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return x1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return x2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return x11; }
+
+
+const Register MathPowIntegerDescriptor::exponent() { return x12; }
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: function info
+ Register registers[] = {cp, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: function
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ Register registers[] = {cp, x3, x2, x1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ Register registers[] = {cp, x3, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: feedback vector
+ // x3: call feedback slot
+ Register registers[] = {cp, x2, x3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, x3, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // x1 function the function to call
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, x1, x3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, x0, x1, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ Register registers[] = {cp, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value (js_array)
+ // x1: to_map
+ Register registers[] = {cp, x0, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ Register registers[] = {cp, x1, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, x1, x2, x0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, x1, x0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value to compare
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x1, // JSFunction
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.h b/deps/v8/src/arm64/interface-descriptors-arm64.h
new file mode 100644
index 0000000000..76def88326
--- /dev/null
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+ explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) {}
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 7bb66dbd70..502b046927 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -354,12 +354,6 @@ const char* LArithmeticT::Mnemonic() const {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -1036,14 +1030,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1252,7 +1246,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
DCHECK(input_rep.IsSmiOrTagged());
return AssignEnvironment(
DefineAsRegister(new(zone()) LClampTToUint8(reg,
- TempRegister(),
TempDoubleRegister())));
}
}
@@ -1475,6 +1468,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
@@ -1561,6 +1555,19 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@@ -1663,11 +1670,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -1725,11 +1732,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -1747,10 +1755,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -1934,12 +1943,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
int32_t constant_abs = Abs(constant);
if (!end_range_constant &&
- (small_constant ||
- (IsPowerOf2(constant_abs)) ||
- (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
- IsPowerOf2(constant_abs - 1))))) {
+ (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
+ (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
+ base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
LConstantOperand* right = UseConstant(most_const);
- bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+ bool need_register =
+ base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
LOperand* left = need_register ? UseRegister(least_const)
: UseRegisterAtStart(least_const);
LInstruction* result =
@@ -1985,10 +1994,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2001,11 +2010,14 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = exponent_type.IsInteger32()
- ? UseFixed(instr->right(), x12)
- : exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d1)
- : UseFixed(instr->right(), x11);
+ LOperand* right;
+ if (exponent_type.IsInteger32()) {
+ right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
+ } else if (exponent_type.IsDouble()) {
+ right = UseFixedDouble(instr->right(), d1);
+ } else {
+ right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+ }
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d0),
instr,
@@ -2203,8 +2215,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
return DoArithmeticT(op, instr);
}
- DCHECK(instr->representation().IsInteger32() ||
- instr->representation().IsSmi());
+ DCHECK(instr->representation().IsSmiOrInteger32());
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
@@ -2215,42 +2226,30 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* left = instr->representation().IsSmi()
? UseRegister(instr->left())
: UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- LOperand* temp = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- right = UseConstant(right_value);
- constant_value = JSShiftAmountFromHConstant(right_value);
- } else {
- right = UseRegisterAtStart(right_value);
- if (op == Token::ROR) {
- temp = TempRegister();
- }
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and the
- // result cannot be truncated to int32.
- bool does_deopt = false;
- if ((op == Token::SHR) && (constant_value == 0)) {
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+
+ // The only shift that can deoptimize is `left >>> 0`, where left is negative.
+ // In these cases, the result is a uint32 that is too large for an int32.
+ bool right_can_be_zero = !instr->right()->IsConstant() ||
+ (JSShiftAmountFromHConstant(instr->right()) == 0);
+ bool can_deopt = false;
+ if ((op == Token::SHR) && right_can_be_zero) {
if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ can_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
LInstruction* result;
if (instr->representation().IsInteger32()) {
- result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
} else {
DCHECK(instr->representation().IsSmi());
- result = DefineAsRegister(
- new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
}
- return does_deopt ? AssignEnvironment(result) : result;
+ return can_deopt ? AssignEnvironment(result) : result;
}
@@ -2379,10 +2378,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(),
- KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2424,8 +2423,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
@@ -2682,7 +2682,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 21a5f74141..6ead3fe8ca 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -165,6 +165,7 @@ class LCodeGen;
V(SubI) \
V(SubS) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -178,11 +179,11 @@ class LCodeGen;
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -294,7 +295,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return (R != 0) && (result() != NULL);
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -316,17 +317,38 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -338,7 +360,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -388,8 +410,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -425,11 +447,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -437,7 +459,7 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -450,14 +472,14 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -466,14 +488,14 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -482,7 +504,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -498,17 +520,17 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -524,18 +546,18 @@ class LLabel V8_FINAL : public LGap {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments,
LOperand* length,
@@ -551,11 +573,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddE FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddE(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -570,7 +592,7 @@ class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -599,7 +621,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -614,7 +636,7 @@ class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 3> {
public:
LAllocate(LOperand* context,
LOperand* size,
@@ -639,7 +661,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -660,7 +682,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LArgumentsElements(LOperand* temp) {
temps_[0] = temp;
@@ -673,7 +695,7 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -685,7 +707,7 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op,
LOperand* left,
@@ -699,18 +721,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -727,18 +749,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -753,7 +775,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -784,7 +806,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -801,7 +823,7 @@ class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LBranch FINAL : public LControlInstruction<1, 2> {
public:
explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
inputs_[0] = value;
@@ -816,11 +838,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -831,13 +853,13 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -854,7 +876,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -867,13 +889,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -886,13 +908,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -903,7 +925,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -913,7 +935,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -926,7 +948,7 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -941,7 +963,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
inputs_[0] = value;
@@ -956,7 +978,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -969,7 +991,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -981,7 +1003,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -994,7 +1016,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -1006,7 +1028,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -1018,23 +1040,21 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1) {
inputs_[0] = unclamped;
temps_[0] = temp1;
- temps_[1] = temp2;
}
LOperand* unclamped() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -1047,7 +1067,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -1061,7 +1081,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -1077,11 +1097,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpHoleAndBranchD FINAL : public LControlInstruction<1, 1> {
public:
explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
inputs_[0] = object;
@@ -1096,7 +1116,7 @@ class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranchT FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranchT(LOperand* object) {
inputs_[0] = object;
@@ -1109,7 +1129,7 @@ class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1126,7 +1146,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1141,7 +1161,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1160,7 +1180,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1176,7 +1196,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1195,11 +1215,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1208,7 +1228,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1219,7 +1239,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1228,7 +1248,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1237,7 +1257,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1248,14 +1268,14 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1272,13 +1292,13 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1291,15 +1311,15 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -1317,7 +1337,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -1337,7 +1357,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -1354,7 +1374,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToIntOrSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToIntOrSmi(LOperand* value) {
inputs_[0] = value;
@@ -1369,7 +1389,7 @@ class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -1385,7 +1405,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -1399,7 +1419,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1412,7 +1432,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 1> {
public:
LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
@@ -1427,11 +1447,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1445,11 +1465,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1459,13 +1479,13 @@ class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1481,7 +1501,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1500,7 +1520,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1509,7 +1529,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1521,42 +1541,41 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const InterfaceDescriptor* descriptor() { return descriptor_; }
+ CallInterfaceDescriptor descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1569,13 +1588,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 2> {
public:
LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
temps_[0] = temp1;
@@ -1590,7 +1609,7 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -1605,11 +1624,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1622,11 +1641,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1637,11 +1656,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1655,11 +1674,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1672,11 +1691,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() const { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1689,7 +1708,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -1702,7 +1721,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1717,14 +1736,14 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1770,7 +1789,7 @@ class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
uint32_t base_offset() const {
return this->hydrogen()->base_offset();
}
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ void PrintDataTo(StringStream* stream) OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
@@ -1824,7 +1843,7 @@ class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
@@ -1844,7 +1863,7 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1863,7 +1882,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1872,7 +1891,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1894,13 +1913,13 @@ class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
LOperand* value() { return this->inputs_[0]; }
BuiltinFunctionId op() const { return this->hydrogen()->op(); }
- void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+class LMathAbs FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
@@ -1930,7 +1949,7 @@ class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
};
-class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+class LMathExp FINAL : public LUnaryMathOperation<4> {
public:
LMathExp(LOperand* value,
LOperand* double_temp1,
@@ -1955,7 +1974,7 @@ class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
// Math.floor with a double result.
-class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorD FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
@@ -1963,14 +1982,14 @@ class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
// Math.floor with an integer result.
-class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorI FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -1989,7 +2008,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2009,7 +2028,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2026,21 +2045,21 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+class LMathLog FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+class LMathClz32 FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2055,7 +2074,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+class LMathPowHalf FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
@@ -2063,7 +2082,7 @@ class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
// Math.round with an integer result.
-class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathRoundD FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathRoundD(LOperand* value)
: LUnaryMathOperation<0>(value) {
@@ -2074,7 +2093,7 @@ class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
// Math.round with an integer result.
-class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
+class LMathRoundI FINAL : public LUnaryMathOperation<1> {
public:
LMathRoundI(LOperand* value, LOperand* temp1)
: LUnaryMathOperation<1>(value) {
@@ -2087,7 +2106,7 @@ class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
};
-class LMathFround V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFround FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
@@ -2095,14 +2114,14 @@ class LMathFround V8_FINAL : public LUnaryMathOperation<0> {
};
-class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+class LMathSqrt FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -2120,7 +2139,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2140,7 +2159,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LModI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2155,7 +2174,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulConstIS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulConstIS(LOperand* left, LConstantOperand* right) {
inputs_[0] = left;
@@ -2170,7 +2189,7 @@ class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2185,7 +2204,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2200,7 +2219,7 @@ class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2217,7 +2236,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LNumberTagU(LOperand* value,
LOperand* temp1,
@@ -2235,7 +2254,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2251,14 +2270,14 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2273,7 +2292,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LPreparePushArguments FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LPreparePushArguments(int argc) : argc_(argc) {}
@@ -2286,7 +2305,7 @@ class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
+class LPushArguments FINAL : public LTemplateResultInstruction<0> {
public:
explicit LPushArguments(Zone* zone,
int capacity = kRecommendedMaxPushedArgs)
@@ -2312,15 +2331,15 @@ class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2333,7 +2352,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
@@ -2356,7 +2375,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LSeqStringGetChar(LOperand* string,
LOperand* index,
@@ -2375,7 +2394,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 1> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -2400,7 +2419,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2413,7 +2432,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2430,7 +2449,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2480,7 +2499,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
}
uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ void PrintDataTo(StringStream* stream) OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
@@ -2503,7 +2522,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
};
-class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedExternal FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2517,7 +2536,7 @@ class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixed FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2531,7 +2550,7 @@ class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixedDouble FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2546,7 +2565,7 @@ class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
@@ -2566,13 +2585,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* object, LOperand* value,
LOperand* temp0, LOperand* temp1) {
@@ -2590,7 +2609,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2598,7 +2617,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2613,14 +2632,14 @@ class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2638,7 +2657,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2655,7 +2674,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2670,7 +2689,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2688,12 +2707,12 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2712,7 +2731,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -2733,19 +2752,17 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LShiftS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
- bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
Token::Value op() const { return op_; }
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
@@ -2756,7 +2773,7 @@ class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object,
LOperand* temp) {
@@ -2769,14 +2786,14 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
LOperand* code_object() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -2793,11 +2810,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2814,7 +2831,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -2858,14 +2875,14 @@ class LSubS: public LTemplateInstruction<1, 2, 0> {
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2878,7 +2895,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2899,7 +2916,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2910,7 +2927,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
inputs_[0] = object;
@@ -2926,7 +2943,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
};
-class LTruncateDoubleToIntOrSmi V8_FINAL
+class LTruncateDoubleToIntOrSmi FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
@@ -2943,7 +2960,7 @@ class LTruncateDoubleToIntOrSmi V8_FINAL
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2957,7 +2974,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2974,11 +2991,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
Handle<String> type_literal() const { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2990,7 +3007,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
inputs_[0] = value;
@@ -3006,7 +3023,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -3049,7 +3066,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -3065,7 +3082,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -3075,17 +3092,13 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
- allocator_(allocator) { }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -3108,27 +3121,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
static bool HasMagicNumberForDivision(int32_t divisor);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return info_->isolate(); }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int argument_count() const { return argument_count_; }
- CompilationInfo* info() const { return info_; }
- Heap* heap() const { return isolate()->heap(); }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -3252,10 +3244,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
LAllocator* allocator_;
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 53a1cfac42..b9b67d9bbd 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -6,15 +6,18 @@
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -590,11 +593,8 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
@@ -824,28 +824,23 @@ bool LCodeGen::GenerateDeferredCode() {
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
Label needs_frame, restore_caller_doubles, call_deopt_entry;
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0]->address;
+ Address base = jump_table_[0]->address;
UseScratchRegisterScope temps(masm());
Register entry_offset = temps.AcquireX();
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- __ Bind(&deopt_jump_table_[i]->label);
+ Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
+ __ Bind(&table_entry->label);
- Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
- Address entry = deopt_jump_table_[i]->address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load the base
@@ -856,7 +851,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// branch.
bool last_entry = (i + 1) == length;
- if (deopt_jump_table_[i]->needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (!needs_frame.is_bound()) {
// This variant of deopt can only be used with stubs. Since we don't
@@ -938,7 +933,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -994,9 +989,9 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
void LCodeGen::DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg, int bit,
- Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, const char* detail, BranchType branch_type,
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
@@ -1045,102 +1040,108 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last()->address != entry) ||
- (deopt_jump_table_.last()->bailout_type != bailout_type) ||
- (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry* table_entry =
- new(zone()) Deoptimizer::JumpTableEntry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry->IsEquivalentTo(*jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ B(&deopt_jump_table_.last()->label,
- branch_type, reg, bit);
+ __ B(&jump_table_.last()->label, branch_type, reg, bit);
}
}
-void LCodeGen::Deoptimize(LEnvironment* environment,
+void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+ DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
}
-void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
- DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
}
-void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_zero, rt);
+void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_zero, rt);
}
-void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_not_zero, rt);
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_not_zero, rt);
}
-void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
+ const char* detail) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, environment);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
}
-void LCodeGen::DeoptimizeIfSmi(Register rt,
- LEnvironment* environment) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
}
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
}
-void LCodeGen::DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, environment);
+ DeoptimizeIf(eq, instr, detail);
}
-void LCodeGen::DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, environment);
+ DeoptimizeIf(ne, instr, detail);
}
-void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+ const char* detail) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, environment);
+ DeoptimizeIf(vs, instr, detail);
}
-void LCodeGen::DeoptimizeIfBitSet(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
+ __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, "not heap number");
}
-void LCodeGen::DeoptimizeIfBitClear(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
}
@@ -1226,17 +1227,7 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
-Operand LCodeGen::ToOperand32I(LOperand* op) {
- return ToOperand32(op, SIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32U(LOperand* op) {
- return ToOperand32(op, UNSIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+Operand LCodeGen::ToOperand32(LOperand* op) {
DCHECK(op != NULL);
if (op->IsRegister()) {
return Operand(ToRegister32(op));
@@ -1245,10 +1236,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return (signedness == SIGNED_INT32)
- ? Operand(constant->Integer32Value())
- : Operand(static_cast<uint32_t>(constant->Integer32Value()));
+ return Operand(constant->Integer32Value());
} else {
// Other constants not implemented.
Abort(kToOperand32UnsupportedImmediate);
@@ -1260,7 +1248,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
}
-static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+static int64_t ArgumentsOffsetWithoutFrame(int index) {
DCHECK(index < 0);
return -(index + 1) * kPointerSize;
}
@@ -1314,12 +1302,10 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
}
-template<class LI>
-Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
- IntegerSignedness signedness) {
+template <class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
if (shift_info->shift() == NO_SHIFT) {
- return (signedness == SIGNED_INT32) ? ToOperand32I(right)
- : ToOperand32U(right);
+ return ToOperand32(right);
} else {
return Operand(
ToRegister32(right),
@@ -1396,11 +1382,11 @@ void LCodeGen::EmitBranchGeneric(InstrType instr,
EmitGoto(left_block);
} else if (left_block == next_block) {
branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- branch.Emit(chunk_->GetAssemblyLabel(left_block));
} else {
branch.Emit(chunk_->GetAssemblyLabel(left_block));
- __ B(chunk_->GetAssemblyLabel(right_block));
+ if (right_block != next_block) {
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
}
}
@@ -1501,7 +1487,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
} else {
Register length = ToRegister32(instr->length());
- Operand index = ToOperand32I(instr->index());
+ Operand index = ToOperand32(instr->index());
__ Sub(result.W(), length, index);
__ Add(result.W(), result.W(), 1);
__ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
@@ -1525,11 +1511,11 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1543,7 +1529,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1669,7 +1655,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1796,15 +1782,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32U(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
switch (instr->op()) {
case Token::BIT_AND: __ And(result, left, right); break;
@@ -1838,19 +1825,19 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
if (instr->index()->IsConstantOperand()) {
- Operand index = ToOperand32I(instr->index());
+ Operand index = ToOperand32(instr->index());
Register length = ToRegister32(instr->length());
__ Cmp(length, index);
cond = CommuteCondition(cond);
} else {
Register index = ToRegister32(instr->index());
- Operand length = ToOperand32I(instr->length());
+ Operand length = ToOperand32(instr->length());
__ Cmp(index, length);
}
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr->environment());
+ DeoptimizeIf(cond, instr, "out of bounds");
}
}
@@ -1929,7 +1916,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr->environment());
+ DeoptimizeIfSmi(value, instr, "Smi");
}
Register map = NoReg;
@@ -1990,7 +1977,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr->environment());
+ Deoptimize(instr, "unexpected object");
}
}
}
@@ -2052,6 +2039,34 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(x1));
+ DCHECK(name.is(x2));
+
+ Register scratch = x3;
+ Register extra = x4;
+ Register extra2 = x5;
+ Register extra3 = x6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -2147,7 +2162,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr->environment());
+ DeoptimizeIfSmi(temp, instr, "instance migration failed");
}
@@ -2202,7 +2217,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ Bind(&success);
@@ -2211,7 +2226,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
}
}
@@ -2219,7 +2234,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr->environment());
+ DeoptimizeIfNotSmi(value, instr, "not a Smi");
}
@@ -2237,27 +2252,29 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr, "wrong instance type");
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
} else {
uint8_t mask;
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
+ if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
- DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
} else {
- DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
}
} else {
if (tag == 0) {
@@ -2266,7 +2283,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
@@ -2289,7 +2306,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Register input = ToRegister(instr->unclamped());
Register result = ToRegister32(instr->result());
- Register scratch = ToRegister(instr->temp1());
Label done;
// Both smi and heap number cases are handled.
@@ -2303,19 +2319,18 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for heap number.
Label is_heap_number;
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+ __ JumpIfHeapNumber(input, &is_heap_number);
// Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ Mov(result, 0);
__ B(&done);
// Heap number case.
__ Bind(&is_heap_number);
DoubleRegister dbl_scratch = double_scratch();
- DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
__ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
@@ -2358,7 +2373,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ JumpIfSmi(input, false_label);
Register map = scratch2;
- if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2383,7 +2398,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
- if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ JumpIfNotObjectType(
scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
} else {
@@ -2451,8 +2466,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
instr->TrueLabel(chunk()));
} else {
Register value = ToRegister(instr->value());
- __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
__ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
}
@@ -2486,16 +2500,12 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else {
if (instr->hydrogen_value()->representation().IsInteger32()) {
if (right->IsConstantOperand()) {
- EmitCompareAndBranch(instr,
- cond,
- ToRegister32(left),
- ToOperand32I(right));
+ EmitCompareAndBranch(instr, cond, ToRegister32(left),
+ ToOperand32(right));
} else {
// Commute the operands and the condition.
- EmitCompareAndBranch(instr,
- CommuteCondition(cond),
- ToRegister32(right),
- ToOperand32I(left));
+ EmitCompareAndBranch(instr, CommuteCondition(cond),
+ ToRegister32(right), ToOperand32(left));
}
} else {
DCHECK(instr->hydrogen_value()->representation().IsSmi());
@@ -2538,7 +2548,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -2618,7 +2628,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "value mismatch");
}
@@ -2642,9 +2652,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr, "Smi");
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2680,8 +2690,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- Deoptimize(instr->environment(), &type);
+ Deoptimize(instr, instr->hydrogen()->reason(), &type);
}
@@ -2689,27 +2698,27 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "division by zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2737,14 +2746,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2756,7 +2765,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr->environment());
+ DeoptimizeIfNotZero(temp, instr, "lost precision");
}
}
@@ -2779,7 +2788,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2791,7 +2800,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
@@ -2803,13 +2812,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr->environment());
+ DeoptimizeIfNotZero(remainder, instr, "lost precision");
}
@@ -2818,11 +2827,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2854,9 +2863,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -2884,7 +2892,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr->environment());
+ DeoptimizeIfZero(result, instr, "no cache");
__ Bind(&done);
}
@@ -2897,18 +2905,17 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "null");
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr, "not a JavaScript object");
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2922,7 +2929,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
__ Bind(&use_cache);
}
@@ -3017,7 +3024,7 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
- __ Add(result, base, ToOperand32I(instr->offset()));
+ __ Add(result, base, ToOperand32(instr->offset()));
} else {
__ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
}
@@ -3315,8 +3322,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3337,8 +3343,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3358,28 +3363,35 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(
- result, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
- __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3482,7 +3494,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
@@ -3579,7 +3591,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
__ Ldr(scratch, mem_op);
__ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "hole");
}
}
@@ -3617,10 +3629,9 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr->environment());
+ DeoptimizeIfNotSmi(result, instr, "not a Smi");
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
}
@@ -3628,19 +3639,13 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3690,19 +3695,13 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
// LoadIC expects name and receiver in registers.
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3734,7 +3733,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -3762,9 +3761,7 @@ void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label runtime_allocation;
// Deoptimize if the input is not a HeapNumber.
- __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
// If the argument is positive, we can return it as-is, without any need to
// allocate a new HeapNumber for the result. We have to do this in integer
@@ -3888,7 +3885,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ Fcvtms(result, input);
@@ -3898,7 +3895,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
}
@@ -3924,13 +3921,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
@@ -3953,14 +3950,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -4003,14 +4000,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
// Check for (0 / -x) that will produce negative zero.
@@ -4020,7 +4017,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
Label done;
@@ -4081,11 +4078,14 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ Register integer_exponent = MathPowIntegerDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(x11));
- DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(!exponent_type.IsInteger32() ||
+ ToRegister(instr->right()).is(integer_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(d0));
DCHECK(ToDoubleRegister(instr->result()).is(d0));
@@ -4094,18 +4094,15 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(x11, &no_deopt);
- __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
__ Bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
// Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
// supports large integer exponents.
- Register exponent = ToRegister(instr->right());
- __ Sxtw(exponent, exponent);
+ __ Sxtw(integer_exponent, integer_exponent);
MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
@@ -4179,18 +4176,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "overflow");
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr->environment());
+ DeoptimizeIfNegative(result, instr, "minus zero");
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "NaN");
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4220,7 +4217,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (instr->hydrogen()->representation().IsInteger32()) {
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToOperand32I(instr->right());
+ Operand right = ToOperand32(instr->right());
__ Cmp(left, right);
__ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
@@ -4268,7 +4265,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ B(&done);
}
@@ -4287,7 +4284,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
@@ -4301,7 +4298,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -4316,12 +4313,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
}
__ Bind(&done);
}
@@ -4344,10 +4341,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr->environment());
+ DeoptimizeIfZero(left, instr, "minus zero");
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "minus zero");
}
}
@@ -4357,7 +4354,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, left);
}
@@ -4373,7 +4370,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, left);
}
@@ -4384,7 +4381,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// can be done efficiently with shifted operands.
int32_t right_abs = Abs(right);
- if (IsPowerOf2(right_abs)) {
+ if (base::bits::IsPowerOfTwo32(right_abs)) {
int right_log2 = WhichPowerOf2(right_abs);
if (can_overflow) {
@@ -4392,7 +4389,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr, "overflow");
}
if (right >= 0) {
@@ -4402,7 +4399,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4417,10 +4414,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!can_overflow);
if (right >= 0) {
- if (IsPowerOf2(right - 1)) {
+ if (base::bits::IsPowerOfTwo32(right - 1)) {
// result = left + left << log2(right - 1)
__ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
- } else if (IsPowerOf2(right + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(right + 1)) {
// result = -left + left << log2(right + 1)
__ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
__ Neg(result, result);
@@ -4428,10 +4425,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
UNREACHABLE();
}
} else {
- if (IsPowerOf2(-right + 1)) {
+ if (base::bits::IsPowerOfTwo32(-right + 1)) {
// result = left - left << log2(-right + 1)
__ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
- } else if (IsPowerOf2(-right - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
// result = -left - left << log2(-right - 1)
__ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
__ Neg(result, result);
@@ -4460,13 +4457,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
} else {
__ Mul(result, left, right);
}
@@ -4490,7 +4487,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4498,7 +4495,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4665,26 +4662,23 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Label convert_undefined;
// Heap number map check.
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
if (can_convert_undefined_to_nan) {
- __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
- &convert_undefined);
+ __ JumpIfNotHeapNumber(input, &convert_undefined);
} else {
- DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
}
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr->environment());
+ DeoptimizeIfMinusZero(result, instr, "minus zero");
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4877,7 +4871,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr->environment());
+ DeoptimizeIfNegative(input.W(), instr, "overflow");
}
__ SmiTag(output, input);
}
@@ -4889,7 +4883,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr->environment());
+ DeoptimizeIfNotSmi(input, instr, "not a Smi");
}
__ Bind(&untag);
@@ -4910,13 +4904,12 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SAR: __ Asr(result, left, right); break;
case Token::SHL: __ Lsl(result, left, right); break;
case Token::SHR:
+ __ Lsr(result, left, right);
if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
+ // If `left >>> right` >= 0x80000000, the result is not representable
+ // in a signed 32-bit smi.
+ DeoptimizeIfNegative(result, instr, "negative value");
}
- __ Lsr(result, left, right);
break;
default: UNREACHABLE();
}
@@ -4925,7 +4918,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -4946,40 +4939,40 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
- // Only ROR by register needs a temp.
- DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
- (instr->temp() == NULL));
-
if (right_op->IsRegister()) {
Register right = ToRegister(instr->right());
+
+ // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
+ // Since we're handling smis in X registers, we have to extract these bits
+ // explicitly.
+ __ Ubfx(result, right, kSmiShift, 5);
+
switch (instr->op()) {
case Token::ROR: {
- Register temp = ToRegister(instr->temp());
- __ Ubfx(temp, right, kSmiShift, 5);
- __ SmiUntag(result, left);
- __ Ror(result.W(), result.W(), temp.W());
+ // This is the only case that needs a scratch register. To keep things
+ // simple for the other cases, borrow a MacroAssembler scratch register.
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireW();
+ __ SmiUntag(temp, left);
+ __ Ror(result.W(), temp.W(), result.W());
__ SmiTag(result);
break;
}
case Token::SAR:
- __ Ubfx(result, right, kSmiShift, 5);
__ Asr(result, left, result);
__ Bic(result, result, kSmiShiftMask);
break;
case Token::SHL:
- __ Ubfx(result, right, kSmiShift, 5);
__ Lsl(result, left, result);
break;
case Token::SHR:
- if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
- }
- __ Ubfx(result, right, kSmiShift, 5);
__ Lsr(result, left, result);
__ Bic(result, result, kSmiShiftMask);
+ if (instr->can_deopt()) {
+ // If `left >>> right` >= 0x80000000, the result is not representable
+ // in a signed 32-bit smi.
+ DeoptimizeIfNegative(result, instr, "negative value");
+ }
break;
default: UNREACHABLE();
}
@@ -4988,7 +4981,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left);
} else {
@@ -5117,8 +5110,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -5156,8 +5148,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(
- payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
}
// Store the value.
@@ -5334,13 +5325,12 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5441,10 +5431,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ Mov(StoreIC::NameRegister(), Operand(instr->name()));
+ __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5557,7 +5547,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -5571,11 +5561,11 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5589,7 +5579,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5606,15 +5596,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
Label done;
- // Load heap object map.
- __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
-
if (instr->truncating()) {
Register output = ToRegister(instr->result());
Label check_bools;
// If it's not a heap number, jump to undefined check.
- __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+ __ JumpIfNotHeapNumber(input, &check_bools);
// A heap number: load value and convert to int32 using truncating function.
__ TruncateHeapNumberToI(output, input);
@@ -5632,28 +5619,25 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined/true/false");
} else {
Register output = ToRegister32(instr->result());
-
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
- // Deoptimized if it's not a heap number.
- DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
// A heap number: load value and convert to int32 using non-truncating
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr->environment());
+ DeoptimizeIfNegative(scratch1, instr, "minus zero");
}
}
__ Bind(&done);
@@ -5794,7 +5778,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "memento found");
__ Bind(&no_memento_found);
}
@@ -5824,13 +5808,22 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
- DCHECK(instr->temp1() != NULL);
- Register map = ToRegister(instr->temp1());
-
__ JumpIfSmi(value, true_label);
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
+
+ int true_block = instr->TrueDestination(chunk_);
+ int false_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ if (true_block == false_block) {
+ EmitGoto(true_block);
+ } else if (true_block == next_block) {
+ __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
+ if (false_block != next_block) {
+ __ B(chunk_->GetAssemblyLabel(false_block));
+ }
+ }
} else if (String::Equals(type_name, factory->string_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@@ -5910,7 +5903,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
@@ -5944,10 +5937,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr->environment());
+ DeoptimizeIfSmi(receiver, instr, "Smi");
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr->environment());
+ Deoptimize(instr, "not a JavaScript object");
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
@@ -5977,7 +5970,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5990,10 +5983,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index bb06f483af..a73bb8caaf 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -27,7 +27,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -83,31 +83,17 @@ class LCodeGen: public LCodeGenBase {
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
// Support for converting LOperands to assembler types.
- // LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
- Operand ToOperand32I(LOperand* op);
- Operand ToOperand32U(LOperand* op);
+ Operand ToOperand32(LOperand* op);
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
- template<class LI>
- Operand ToShiftedRightOperand32I(LOperand* right,
- LI* shift_info) {
- return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
- }
- template<class LI>
- Operand ToShiftedRightOperand32U(LOperand* right,
- LI* shift_info) {
- return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
- }
- template<class LI>
- Operand ToShiftedRightOperand32(LOperand* right,
- LI* shift_info,
- IntegerSignedness signedness);
+ template <class LI>
+ Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
@@ -158,8 +144,6 @@ class LCodeGen: public LCodeGenBase {
Register object,
Register index);
- Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
@@ -212,6 +196,9 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -226,27 +213,31 @@ class LCodeGen: public LCodeGenBase {
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg = NoReg, int bit = -1,
- Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LEnvironment* environment,
+ void DeoptimizeBranch(LInstruction* instr, const char* detail,
+ BranchType branch_type, Register reg = NoReg,
+ int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LEnvironment* environment);
- void DeoptimizeIfZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
- void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
- void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
- void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfNegative(Register rt, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+ const char* detail);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
@@ -286,10 +277,10 @@ class LCodeGen: public LCodeGenBase {
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -338,7 +329,7 @@ class LCodeGen: public LCodeGenBase {
Register function_reg = NoReg);
// Support for recording safepoint and position information.
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -351,10 +342,10 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index f7c724842a..23767e48b3 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -13,6 +13,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+#include "src/base/bits.h"
namespace v8 {
@@ -1520,7 +1521,7 @@ void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(IsPowerOf2(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1538,7 +1539,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1578,7 +1579,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(IsPowerOf2(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1599,7 +1600,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 658497b9f7..3d6709777f 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -1656,12 +1658,6 @@ void MacroAssembler::ThrowUncatchable(Register value,
}
-void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- DCHECK(smi.Is64Bits());
- Abs(smi, smi, slow);
-}
-
-
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2059,7 +2055,7 @@ void MacroAssembler::CallCFunction(Register function,
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
- DCHECK(IsPowerOf2(sp_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
@@ -2251,58 +2247,38 @@ int MacroAssembler::CallSize(Handle<Code> code,
}
+void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
+ SmiCheckType smi_check_type) {
+ Label on_not_heap_number;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, &on_not_heap_number);
+ }
-
-void MacroAssembler::JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number) {
- DCHECK(on_heap_number || on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
- // Load the HeapNumber map if it is not passed.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = temps.AcquireX();
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- } else {
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- }
-
- DCHECK(!AreAliased(temp, heap_number_map));
-
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- Cmp(temp, heap_number_map);
+ JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
- if (on_heap_number) {
- B(eq, on_heap_number);
- }
- if (on_not_heap_number) {
- B(ne, on_not_heap_number);
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- on_heap_number,
- NULL);
+ Bind(&on_not_heap_number);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- NULL,
- on_not_heap_number);
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, on_not_heap_number);
+ }
+
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
}
@@ -2336,8 +2312,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
Label load_result_from_cache;
JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(object, not_found);
STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -2699,14 +2674,9 @@ void MacroAssembler::FillFields(Register dst,
}
-void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check) {
-
+void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure, SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
@@ -2721,73 +2691,64 @@ void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
Bind(&not_smi);
}
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
- static const int kFlatAsciiStringMask =
+ static const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, kFlatAsciiStringMask);
- Cmp(scratch, kFlatAsciiStringTag);
+ And(scratch, type, kFlatOneByteStringMask);
+ Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(first, second, scratch1, scratch2));
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Register type,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+ Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
// continue
@@ -3013,12 +2974,22 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
- DCHECK(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
+ const Register old_stack_pointer = StackPointer();
+ if (csp.Is(old_stack_pointer)) {
+ // This currently only happens during compiler-unittest. If it arises
+ // during regular code generation the DoubleToI stub should be updated to
+ // cope with csp and have an extra parameter indicating which stack pointer
+ // it should use.
+ Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
+ Mov(jssp, csp);
+ SetStackPointer(jssp);
+ }
+
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
@@ -3030,8 +3001,15 @@ void MacroAssembler::TruncateDoubleToI(Register result,
true); // skip_fastpath
CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
- Drop(1, kDoubleSize); // Drop the double input on the stack.
- Pop(lr);
+ DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
+ Pop(xzr, lr); // xzr to drop the double input on the stack.
+
+ if (csp.Is(old_stack_pointer)) {
+ Mov(csp, jssp);
+ SetStackPointer(csp);
+ AssertStackConsistency();
+ Pop(xzr, jssp);
+ }
Bind(&done);
}
@@ -3556,12 +3534,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
@@ -3570,7 +3546,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -3579,11 +3555,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3603,11 +3576,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -3615,11 +3587,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3640,20 +3609,17 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3754,9 +3720,16 @@ void MacroAssembler::CompareInstanceType(Register map,
}
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map) {
+void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register obj_map = temps.AcquireX();
+ Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareRoot(obj_map, index);
+}
+
+
+void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
+ Handle<Map> map) {
Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMap(scratch, map);
}
@@ -3777,7 +3750,7 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- CompareMap(obj, scratch, map);
+ CompareObjectMap(obj, scratch, map);
B(ne, fail);
}
@@ -4017,8 +3990,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number.
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
- fail, DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(value_reg, fail);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
@@ -4284,8 +4256,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Bind(&store_buffer_overflow);
Push(lr);
- StoreBufferOverflowStub store_buffer_overflow_stub =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub);
Pop(lr);
@@ -4424,8 +4395,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
- Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
}
@@ -4444,7 +4415,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareMap(map, temp, isolate()->factory()->meta_map());
+ CompareObjectMap(map, temp, isolate()->factory()->meta_map());
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
@@ -4496,8 +4467,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -4569,8 +4540,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -4775,8 +4746,8 @@ void MacroAssembler::EnsureNotWhite(
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -5315,13 +5286,15 @@ void MacroAssembler::TruncatingDiv(Register result,
int32_t divisor) {
DCHECK(!AreAliased(result, dividend));
DCHECK(result.Is32Bits() && dividend.Is32Bits());
- MultiplierAndShift ms(divisor);
- Mov(result, ms.multiplier());
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ Mov(result, mag.multiplier);
Smull(result.X(), dividend, result);
Asr(result.X(), result.X(), 32);
- if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
- if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
- if (ms.shift() > 0) Asr(result, result, ms.shift());
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) Add(result, result, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
+ if (mag.shift > 0) Asr(result, result, mag.shift);
Add(result, result, Operand(dividend, LSR, 31));
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index aa83c7040f..7a106a18b9 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -7,9 +7,11 @@
#include <vector>
+#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -808,7 +810,7 @@ class MacroAssembler : public Assembler {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
- DCHECK(IsPowerOf2(sp_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
@@ -909,11 +911,6 @@ class MacroAssembler : public Assembler {
inline void SmiTagAndPush(Register src);
inline void SmiTagAndPush(Register src1, Register src2);
- // Compute the absolute value of 'smi' and leave the result in 'smi'
- // register. If 'smi' is the most negative SMI, the absolute value cannot
- // be represented as a SMI and a jump to 'slow' is done.
- void SmiAbs(const Register& smi, Label* slow);
-
inline void JumpIfSmi(Register value,
Label* smi_label,
Label* not_smi_label = NULL);
@@ -950,16 +947,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
- void JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number = NULL);
- void JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map = NoReg);
- void JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- Register heap_number_map = NoReg);
+ void JumpIfHeapNumber(Register object, Label* on_heap_number,
+ SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
+ void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
+ SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
// Sets the vs flag if the input is -0.0.
void TestForMinusZero(DoubleRegister input);
@@ -1055,41 +1046,30 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ----
- // Jump to label if either object is not a sequential ASCII string.
+ // Jump to label if either object is not a sequential one-byte string.
// Optionally perform a smi check on the objects first.
- void JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check = DO_SMI_CHECK);
+ void JumpIfEitherIsNotSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
- // Check if instance type is sequential ASCII string and jump to label if
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
// ---- Calling / Jumping helpers ----
@@ -1369,32 +1349,25 @@ class MacroAssembler : public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
@@ -1470,9 +1443,11 @@ class MacroAssembler : public Assembler {
// Compare an object's map with the specified map. Condition flags are set
// with result of map compare.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map);
+ void CompareObjectMap(Register obj, Heap::RootListIndex index);
+
+ // Compare an object's map with the specified map. Condition flags are set
+ // with result of map compare.
+ void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
index 432d9568bd..e9a485d090 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -260,7 +260,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
}
for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
DCHECK(str[i] <= String::kMaxOneByteCharCode);
} else {
@@ -307,7 +307,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Cmn(capture_length, current_input_offset());
BranchOrBacktrack(gt, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -447,7 +447,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
Label loop;
__ Bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
__ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
} else {
@@ -530,7 +530,7 @@ void RegExpMacroAssemblerARM64::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ Mov(x11, Operand(table));
- if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ if ((mode_ != LATIN1) || (kTableMask != String::kMaxOneByteCharCode)) {
__ And(w10, current_character(), kTableMask);
__ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
} else {
@@ -548,7 +548,7 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
// Check for ' ' or 0x00a0.
@@ -611,8 +611,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -623,8 +623,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ Cmp(current_character(), 'z');
__ B(hi, &done);
}
@@ -1315,7 +1315,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInput));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1346,8 +1346,8 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1675,7 +1675,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
offset = w10;
}
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
} else if (characters == 2) {
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
index a27cff0566..632c513643 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -265,7 +265,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index cde93db98e..129252b49b 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -30,30 +30,28 @@ namespace internal {
// Helpers for colors.
-// Depending on your terminal configuration, the colour names may not match the
-// observed colours.
-#define COLOUR(colour_code) "\033[" colour_code "m"
-#define BOLD(colour_code) "1;" colour_code
-#define NORMAL ""
-#define GREY "30"
-#define GREEN "32"
-#define ORANGE "33"
-#define BLUE "34"
-#define PURPLE "35"
-#define INDIGO "36"
-#define WHITE "37"
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
+#define MAGENTA "35"
+#define CYAN "36"
+#define WHITE "37"
typedef char const * const TEXT_COLOUR;
TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
-TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
-TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
-TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
-TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
-TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
-TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
-TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
-TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
+TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
+TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
@@ -337,7 +335,7 @@ uintptr_t Simulator::PopAddress() {
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+ return stack_limit_ + 1024;
}
@@ -380,11 +378,11 @@ void Simulator::Init(FILE* stream) {
// Allocate and setup the simulator stack.
stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
- stack_ = new byte[stack_size_];
+ stack_ = reinterpret_cast<uintptr_t>(new byte[stack_size_]);
stack_limit_ = stack_ + stack_protection_size_;
- byte* tos = stack_ + stack_size_ - stack_protection_size_;
- // The stack pointer must be 16 bytes aligned.
- set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+ uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16-byte aligned.
+ set_sp(tos & ~0xfUL);
stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_);
@@ -420,7 +418,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() {
- delete[] stack_;
+ delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
}
@@ -704,7 +702,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::PROFILING_GETTER_CALL: {
// void f(Local<String> property, PropertyCallbackInfo& info,
- // AccessorGetterCallback callback)
+ // AccessorNameGetterCallback callback)
TraceSim("Type: PROFILING_GETTER_CALL\n");
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
@@ -734,15 +732,15 @@ void* Simulator::RedirectExternalReference(void* external_function,
const char* Simulator::xreg_names[] = {
-"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
-"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
const char* Simulator::wreg_names[] = {
-"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
-"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
const char* Simulator::sreg_names[] = {
@@ -765,7 +763,12 @@ const char* Simulator::vreg_names[] = {
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters);
+ // The modulo operator has no effect here, but it silences a broken GCC
+ // warning about out-of-bounds array accesses.
+ code %= kNumberOfRegisters;
+
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -775,7 +778,10 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters);
+ code %= kNumberOfRegisters;
+
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -785,20 +791,23 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::SRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return sreg_names[code];
+ return sreg_names[code % kNumberOfFPRegisters];
}
const char* Simulator::DRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return dreg_names[code];
+ return dreg_names[code % kNumberOfFPRegisters];
}
const char* Simulator::VRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return vreg_names[code];
+ return vreg_names[code % kNumberOfFPRegisters];
}
@@ -855,6 +864,7 @@ T Simulator::AddWithCarry(bool set_flags,
nzcv().SetZ(Z);
nzcv().SetC(C);
nzcv().SetV(V);
+ LogSystemRegister(NZCV);
}
return result;
}
@@ -978,6 +988,7 @@ void Simulator::FPCompare(double val0, double val1) {
} else {
UNREACHABLE();
}
+ LogSystemRegister(NZCV);
}
@@ -1044,118 +1055,206 @@ void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
}
-void Simulator::PrintSystemRegisters(bool print_all) {
- static bool first_run = true;
+void Simulator::PrintSystemRegisters() {
+ PrintSystemRegister(NZCV);
+ PrintSystemRegister(FPCR);
+}
+
+
+void Simulator::PrintRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ PrintRegister(i);
+ }
+}
+
- static SimSystemRegister last_nzcv;
- if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
- fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
- clr_flag_name,
- clr_flag_value,
- nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
- clr_normal);
+void Simulator::PrintFPRegisters() {
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ PrintFPRegister(i);
}
- last_nzcv = nzcv();
+}
+
- static SimSystemRegister last_fpcr;
- if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
- static const char * rmode[] = {
- "0b00 (Round to Nearest)",
- "0b01 (Round towards Plus Infinity)",
- "0b10 (Round towards Minus Infinity)",
- "0b11 (Round towards Zero)"
- };
- DCHECK(fpcr().RMode() < ARRAY_SIZE(rmode));
- fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
- clr_flag_name,
- clr_flag_value,
- fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
- clr_normal);
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ // Don't print writes into xzr.
+ if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+ return;
}
- last_fpcr = fpcr();
- first_run = false;
+ // The template is "# x<code>:value".
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(code, r31mode),
+ clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
}
-void Simulator::PrintRegisters(bool print_all_regs) {
- static bool first_run = true;
- static int64_t last_regs[kNumberOfRegisters];
+void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
+ // The template is "# v<code>:bits (d<code>:value, ...)".
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if (print_all_regs || first_run ||
- (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ DCHECK(sizes != 0);
+ DCHECK((sizes & kPrintAllFPRegValues) == sizes);
+
+ // Print the raw bits.
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
+ clr_fpreg_name, VRegNameForCode(code),
+ clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
+
+ // Print all requested value interpretations.
+ bool need_separator = false;
+ if (sizes & kPrintDRegValue) {
+ fprintf(stream_, "%s%s%s: %s%g%s",
+ need_separator ? ", " : "",
+ clr_fpreg_name, DRegNameForCode(code),
+ clr_fpreg_value, fpreg<double>(code), clr_normal);
+ need_separator = true;
+ }
+
+ if (sizes & kPrintSRegValue) {
+ fprintf(stream_, "%s%s%s: %s%g%s",
+ need_separator ? ", " : "",
+ clr_fpreg_name, SRegNameForCode(code),
+ clr_fpreg_value, fpreg<float>(code), clr_normal);
+ need_separator = true;
+ }
+
+ // End the value list.
+ fprintf(stream_, ")\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name, clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ break;
+ case FPCR: {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ DCHECK(fpcr().RMode() < arraysize(rmode));
fprintf(stream_,
- "# %s%4s:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name,
- XRegNameForCode(i, Reg31IsStackPointer),
- clr_reg_value,
- xreg(i, Reg31IsStackPointer),
+ "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
clr_normal);
+ break;
}
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = xreg(i, Reg31IsStackPointer);
+ default:
+ UNREACHABLE();
}
- first_run = false;
}
-void Simulator::PrintFPRegisters(bool print_all_regs) {
- static bool first_run = true;
- static uint64_t last_regs[kNumberOfFPRegisters];
+void Simulator::PrintRead(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ USE(size); // Size is unused here.
- // Print as many rows of registers as necessary, keeping each individual
- // register in the same column each time (to make it easy to visually scan
- // for changes).
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
- fprintf(stream_,
- "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
- clr_fpreg_name,
- VRegNameForCode(i),
- clr_fpreg_value,
- dreg_bits(i),
- clr_normal,
- clr_fpreg_name,
- DRegNameForCode(i),
- clr_fpreg_value,
- dreg(i),
- clr_fpreg_name,
- SRegNameForCode(i),
- clr_fpreg_value,
- sreg(i),
- clr_normal);
- }
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = dreg_bits(i);
+ // The template is "# x<code>:value <- address".
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+ clr_reg_name, XRegNameForCode(reg_code),
+ clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintReadFP(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:bits (reg:value) <- address".
+ switch (size) {
+ case kSRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, SRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+ break;
+ case kDRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, DRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
}
- first_run = false;
+
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
-void Simulator::PrintProcessorState() {
- PrintSystemRegisters();
- PrintRegisters();
- PrintFPRegisters();
+void Simulator::PrintWrite(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:value -> address". To keep the trace tidy and
+ // readable, the value is aligned with the values in the register trace.
+ switch (size) {
+ case kByteSizeInBytes:
+ fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
+ break;
+ case kHalfWordSizeInBytes:
+ fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
+ break;
+ case kWRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
+ break;
+ case kXRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+ clr_reg_name, XRegNameForCode(reg_code),
+ clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
-void Simulator::PrintWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- // The template is "# value -> address". The template is not directly used
- // in the printf since compilers tend to struggle with the parametrized
- // width (%0*).
- const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
- fprintf(stream_,
- format,
- clr_memory_value,
- num_bytes * 2, // The width in hexa characters.
- value,
- clr_normal,
- clr_memory_address,
- address,
- clr_normal);
+void Simulator::PrintWriteFP(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ switch (size) {
+ case kSRegSize:
+ fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
+ clr_fpreg_name, SRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+ break;
+ case kDRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, DRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
@@ -1384,6 +1483,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
nzcv().SetZ(CalcZFlag(result));
nzcv().SetC(0);
nzcv().SetV(0);
+ LogSystemRegister(NZCV);
}
set_reg<T>(instr->Rd(), result, instr->RdMode());
@@ -1424,6 +1524,7 @@ void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
} else {
// If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
}
}
@@ -1464,9 +1565,8 @@ void Simulator::LoadStoreHelper(Instruction* instr,
AddrMode addrmode) {
unsigned srcdst = instr->Rt();
unsigned addr_reg = instr->Rn();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- int num_bytes = 1 << instr->SizeLS();
- uint8_t* stack = NULL;
+ uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uintptr_t stack = 0;
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
@@ -1480,44 +1580,50 @@ void Simulator::LoadStoreHelper(Instruction* instr,
// For store the address post writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
}
LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
switch (op) {
- case LDRB_w:
- case LDRH_w:
- case LDR_w:
- case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
- case STRB_w:
- case STRH_w:
- case STR_w:
- case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
- case LDRSB_w: {
- set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB));
- break;
- }
- case LDRSB_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
- break;
- }
- case LDRSH_w: {
- set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH));
- break;
- }
- case LDRSH_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
- break;
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS). We will print a more detailed log.
+ case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
+ case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
+ case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
+ case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
+ case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+ case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+ case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+ case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+ case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
+ case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
+ case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
+
+ case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
+ case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
+ case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
+ case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
+ case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
+ case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
+
+ default: UNIMPLEMENTED();
+ }
+
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ size_t access_size = 1 << instr->SizeLS();
+ if (instr->IsLoad()) {
+ if ((op == LDR_s) || (op == LDR_d)) {
+ LogReadFP(address, access_size, srcdst);
+ } else {
+ LogRead(address, access_size, srcdst);
}
- case LDRSW_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
- break;
+ } else {
+ if ((op == STR_s) || (op == STR_d)) {
+ LogWriteFP(address, access_size, srcdst);
+ } else {
+ LogWrite(address, access_size, srcdst);
}
- case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
- case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
- case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
- case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
- default: UNIMPLEMENTED();
}
// Handle the writeback for loads after the load to ensure safe pop
@@ -1527,7 +1633,7 @@ void Simulator::LoadStoreHelper(Instruction* instr,
if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode);
}
@@ -1563,9 +1669,11 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
unsigned rt = instr->Rt();
unsigned rt2 = instr->Rt2();
unsigned addr_reg = instr->Rn();
- int offset = instr->ImmLSPair() << instr->SizeLSPair();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- uint8_t* stack = NULL;
+ size_t access_size = 1 << instr->SizeLSPair();
+ int64_t offset = instr->ImmLSPair() * access_size;
+ uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uintptr_t address2 = address + access_size;
+ uintptr_t stack = 0;
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
@@ -1579,7 +1687,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
// For store the address post writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
}
LoadStorePairOp op =
@@ -1589,55 +1697,85 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
switch (op) {
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS). We will print a more detailed log.
case LDP_w: {
- set_wreg(rt, MemoryRead32(address));
- set_wreg(rt2, MemoryRead32(address + kWRegSize));
+ DCHECK(access_size == kWRegSize);
+ set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+ set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
break;
}
case LDP_s: {
- set_sreg(rt, MemoryReadFP32(address));
- set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+ DCHECK(access_size == kSRegSize);
+ set_sreg_no_log(rt, MemoryRead<float>(address));
+ set_sreg_no_log(rt2, MemoryRead<float>(address2));
break;
}
case LDP_x: {
- set_xreg(rt, MemoryRead64(address));
- set_xreg(rt2, MemoryRead64(address + kXRegSize));
+ DCHECK(access_size == kXRegSize);
+ set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+ set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
break;
}
case LDP_d: {
- set_dreg(rt, MemoryReadFP64(address));
- set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+ DCHECK(access_size == kDRegSize);
+ set_dreg_no_log(rt, MemoryRead<double>(address));
+ set_dreg_no_log(rt2, MemoryRead<double>(address2));
break;
}
case LDPSW_x: {
- set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
- set_xreg(rt2, ExtendValue<int64_t>(
- MemoryRead32(address + kWRegSize), SXTW));
+ DCHECK(access_size == kWRegSize);
+ set_xreg_no_log(rt, MemoryRead<int32_t>(address));
+ set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
break;
}
case STP_w: {
- MemoryWrite32(address, wreg(rt));
- MemoryWrite32(address + kWRegSize, wreg(rt2));
+ DCHECK(access_size == kWRegSize);
+ MemoryWrite<uint32_t>(address, wreg(rt));
+ MemoryWrite<uint32_t>(address2, wreg(rt2));
break;
}
case STP_s: {
- MemoryWriteFP32(address, sreg(rt));
- MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+ DCHECK(access_size == kSRegSize);
+ MemoryWrite<float>(address, sreg(rt));
+ MemoryWrite<float>(address2, sreg(rt2));
break;
}
case STP_x: {
- MemoryWrite64(address, xreg(rt));
- MemoryWrite64(address + kXRegSize, xreg(rt2));
+ DCHECK(access_size == kXRegSize);
+ MemoryWrite<uint64_t>(address, xreg(rt));
+ MemoryWrite<uint64_t>(address2, xreg(rt2));
break;
}
case STP_d: {
- MemoryWriteFP64(address, dreg(rt));
- MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+ DCHECK(access_size == kDRegSize);
+ MemoryWrite<double>(address, dreg(rt));
+ MemoryWrite<double>(address2, dreg(rt2));
break;
}
default: UNREACHABLE();
}
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ if (instr->IsLoad()) {
+ if ((op == LDP_s) || (op == LDP_d)) {
+ LogReadFP(address, access_size, rt);
+ LogReadFP(address2, access_size, rt2);
+ } else {
+ LogRead(address, access_size, rt);
+ LogRead(address2, access_size, rt2);
+ }
+ } else {
+ if ((op == STP_s) || (op == STP_d)) {
+ LogWriteFP(address, access_size, rt);
+ LogWriteFP(address2, access_size, rt2);
+ } else {
+ LogWrite(address, access_size, rt);
+ LogWrite(address2, access_size, rt2);
+ }
+ }
+
// Handle the writeback for loads after the load to ensure safe pop
// operation even when interrupted in the middle of it. The stack pointer
// is only updated after the load so pop(fp) will never break the invariant
@@ -1645,7 +1783,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode);
}
@@ -1657,24 +1795,37 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
void Simulator::VisitLoadLiteral(Instruction* instr) {
- uint8_t* address = instr->LiteralAddress();
+ uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
- case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
- case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
- case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS), then print a more detailed log.
+ case LDR_w_lit:
+ set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+ LogRead(address, kWRegSize, rt);
+ break;
+ case LDR_x_lit:
+ set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+ LogRead(address, kXRegSize, rt);
+ break;
+ case LDR_s_lit:
+ set_sreg_no_log(rt, MemoryRead<float>(address));
+ LogReadFP(address, kSRegSize, rt);
+ break;
+ case LDR_d_lit:
+ set_dreg_no_log(rt, MemoryRead<double>(address));
+ LogReadFP(address, kDRegSize, rt);
+ break;
default: UNREACHABLE();
}
}
-uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode) {
+uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
+ AddrMode addrmode) {
const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
- int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
// When the base register is SP the stack pointer is required to be
// quadword aligned prior to the address calculation and write-backs.
@@ -1686,7 +1837,7 @@ uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
address += offset;
}
- return reinterpret_cast<uint8_t*>(address);
+ return address;
}
@@ -1701,88 +1852,21 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
-void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) {
fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
- fprintf(stream_, " sp is here: 0x%16p\n", stack);
- fprintf(stream_, " access was here: 0x%16p\n", address);
- fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack));
+ fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(address));
+ fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack_limit_));
fprintf(stream_, "\n");
FATAL("ACCESS BELOW STACK POINTER");
}
}
-uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
- DCHECK(address != NULL);
- DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
- uint64_t read = 0;
- memcpy(&read, address, num_bytes);
- return read;
-}
-
-
-uint8_t Simulator::MemoryRead8(uint8_t* address) {
- return MemoryRead(address, sizeof(uint8_t));
-}
-
-
-uint16_t Simulator::MemoryRead16(uint8_t* address) {
- return MemoryRead(address, sizeof(uint16_t));
-}
-
-
-uint32_t Simulator::MemoryRead32(uint8_t* address) {
- return MemoryRead(address, sizeof(uint32_t));
-}
-
-
-float Simulator::MemoryReadFP32(uint8_t* address) {
- return rawbits_to_float(MemoryRead32(address));
-}
-
-
-uint64_t Simulator::MemoryRead64(uint8_t* address) {
- return MemoryRead(address, sizeof(uint64_t));
-}
-
-
-double Simulator::MemoryReadFP64(uint8_t* address) {
- return rawbits_to_double(MemoryRead64(address));
-}
-
-
-void Simulator::MemoryWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- DCHECK(address != NULL);
- DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
-
- LogWrite(address, value, num_bytes);
- memcpy(address, &value, num_bytes);
-}
-
-
-void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
- MemoryWrite(address, value, sizeof(uint32_t));
-}
-
-
-void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
- MemoryWrite32(address, float_to_rawbits(value));
-}
-
-
-void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
- MemoryWrite(address, value, sizeof(uint64_t));
-}
-
-
-void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
- MemoryWrite64(address, double_to_rawbits(value));
-}
-
-
void Simulator::VisitMoveWideImmediate(Instruction* instr) {
MoveWideImmediateOp mov_op =
static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
@@ -2331,6 +2415,7 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
} else {
// If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
}
break;
}
@@ -3113,8 +3198,14 @@ void Simulator::VisitSystem(Instruction* instr) {
}
case MSR: {
switch (instr->ImmSystemRegister()) {
- case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
- case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ case NZCV:
+ nzcv().SetRawValue(xreg(instr->Rt()));
+ LogSystemRegister(NZCV);
+ break;
+ case FPCR:
+ fpcr().SetRawValue(xreg(instr->Rt()));
+ LogSystemRegister(FPCR);
+ break;
default: UNIMPLEMENTED();
}
break;
@@ -3325,8 +3416,8 @@ void Simulator::Debug() {
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
- PrintRegisters(true);
- PrintFPRegisters(true);
+ PrintRegisters();
+ PrintFPRegisters();
} else {
if (!PrintValue(arg1)) {
PrintF("%s unrecognized\n", arg1);
@@ -3530,7 +3621,7 @@ void Simulator::VisitException(Instruction* instr) {
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
if (message != NULL) {
PrintF(stream_,
- "%sDebugger hit %d: %s%s%s\n",
+ "# %sDebugger hit %d: %s%s%s\n",
clr_debug_number,
code,
clr_debug_message,
@@ -3538,7 +3629,7 @@ void Simulator::VisitException(Instruction* instr) {
clr_normal);
} else {
PrintF(stream_,
- "%sDebugger hit %d.%s\n",
+ "# %sDebugger hit %d.%s\n",
clr_debug_number,
code,
clr_normal);
@@ -3565,9 +3656,9 @@ void Simulator::VisitException(Instruction* instr) {
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
- if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
- if (parameters & LOG_REGS) PrintRegisters(true);
- if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
+ if (parameters & LOG_REGS) PrintRegisters();
+ if (parameters & LOG_FP_REGS) PrintFPRegisters();
}
// The stop parameters are inlined in the code. Skip them:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 6b0211816c..108f6f2b54 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -312,7 +312,6 @@ class Simulator : public DecoderVisitor {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
Decode(pc_);
- LogProcessorState();
increment_pc();
CheckBreakpoints();
}
@@ -348,16 +347,13 @@ class Simulator : public DecoderVisitor {
return reg<int64_t>(code, r31mode);
}
- // Write 'size' bits of 'value' into an integer register. The value is
- // zero-extended. This behaviour matches AArch64 register writes.
-
- // Like set_reg(), but infer the access size from the template type.
+ // Write 'value' into an integer register. The value is zero-extended. This
+ // behaviour matches AArch64 register writes.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- DCHECK(code < kNumberOfRegisters);
- if (!IsZeroRegister(code, r31mode))
- registers_[code].Set(value);
+ set_reg_no_log(code, value, r31mode);
+ LogRegister(code, r31mode);
}
// Common specialized accessors for the set_reg() template.
@@ -371,6 +367,26 @@ class Simulator : public DecoderVisitor {
set_reg(code, value, r31mode);
}
+ // As above, but don't automatically log the register update.
+ template <typename T>
+ void set_reg_no_log(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ DCHECK(code < kNumberOfRegisters);
+ if (!IsZeroRegister(code, r31mode)) {
+ registers_[code].Set(value);
+ }
+ }
+
+ void set_wreg_no_log(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg_no_log(code, value, r31mode);
+ }
+
+ void set_xreg_no_log(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg_no_log(code, value, r31mode);
+ }
+
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
@@ -430,9 +446,13 @@ class Simulator : public DecoderVisitor {
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
- DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
- DCHECK(code < kNumberOfFPRegisters);
- fpregisters_[code].Set(value);
+ set_fpreg_no_log(code, value);
+
+ if (sizeof(value) <= kSRegSize) {
+ LogFPRegister(code, kPrintSRegValue);
+ } else {
+ LogFPRegister(code, kPrintDRegValue);
+ }
}
// Common specialized accessors for the set_fpreg() template.
@@ -452,6 +472,22 @@ class Simulator : public DecoderVisitor {
set_fpreg(code, value);
}
+ // As above, but don't automatically log the register update.
+ template <typename T>
+ void set_fpreg_no_log(unsigned code, T value) {
+ DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ DCHECK(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value);
+ }
+
+ void set_sreg_no_log(unsigned code, float value) {
+ set_fpreg_no_log(code, value);
+ }
+
+ void set_dreg_no_log(unsigned code, double value) {
+ set_fpreg_no_log(code, value);
+ }
+
SimSystemRegister& nzcv() { return nzcv_; }
SimSystemRegister& fpcr() { return fpcr_; }
@@ -478,27 +514,68 @@ class Simulator : public DecoderVisitor {
// Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count);
- void PrintSystemRegisters(bool print_all = false);
- void PrintRegisters(bool print_all_regs = false);
- void PrintFPRegisters(bool print_all_regs = false);
- void PrintProcessorState();
- void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ // Print all registers of the specified types.
+ void PrintRegisters();
+ void PrintFPRegisters();
+ void PrintSystemRegisters();
+
+ // Like Print* (above), but respect log_parameters().
void LogSystemRegisters() {
- if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters();
}
void LogRegisters() {
- if (log_parameters_ & LOG_REGS) PrintRegisters();
+ if (log_parameters() & LOG_REGS) PrintRegisters();
}
void LogFPRegisters() {
- if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ if (log_parameters() & LOG_FP_REGS) PrintFPRegisters();
+ }
+
+ // Specify relevant register sizes, for PrintFPRegister.
+ //
+ // These values are bit masks; they can be combined in case multiple views of
+ // a machine register are interesting.
+ enum PrintFPRegisterSizes {
+ kPrintDRegValue = 1 << kDRegSize,
+ kPrintSRegValue = 1 << kSRegSize,
+ kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue
+ };
+
+ // Print individual register values (after update).
+ void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+ void PrintFPRegister(unsigned code,
+ PrintFPRegisterSizes sizes = kPrintAllFPRegValues);
+ void PrintSystemRegister(SystemRegister id);
+
+ // Like Print* (above), but respect log_parameters().
+ void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+ if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode);
}
- void LogProcessorState() {
- LogSystemRegisters();
- LogRegisters();
- LogFPRegisters();
+ void LogFPRegister(unsigned code,
+ PrintFPRegisterSizes sizes = kPrintAllFPRegValues) {
+ if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes);
}
- void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
- if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ void LogSystemRegister(SystemRegister id) {
+ if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id);
+ }
+
+ // Print memory accesses.
+ void PrintRead(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintWrite(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code);
+
+ // Like Print* (above), but respect log_parameters().
+ void LogRead(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code);
+ }
+ void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code);
+ }
+ void LogWrite(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code);
+ }
+ void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code);
}
int log_parameters() { return log_parameters_; }
@@ -589,28 +666,30 @@ class Simulator : public DecoderVisitor {
int64_t offset,
AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
- uint8_t* LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode);
+ uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
+ AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
- void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
-
- uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
- uint8_t MemoryRead8(uint8_t* address);
- uint16_t MemoryRead16(uint8_t* address);
- uint32_t MemoryRead32(uint8_t* address);
- float MemoryReadFP32(uint8_t* address);
- uint64_t MemoryRead64(uint8_t* address);
- double MemoryReadFP64(uint8_t* address);
+ void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
- void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
- void MemoryWrite32(uint8_t* address, uint32_t value);
- void MemoryWriteFP32(uint8_t* address, float value);
- void MemoryWrite64(uint8_t* address, uint64_t value);
- void MemoryWriteFP64(uint8_t* address, double value);
+ // Memory read helpers.
+ template <typename T, typename A>
+ T MemoryRead(A address) {
+ T value;
+ STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ memcpy(&value, reinterpret_cast<const void*>(address), sizeof(value));
+ return value;
+ }
+ // Memory write helpers.
+ template <typename T, typename A>
+ void MemoryWrite(A address, T value) {
+ STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ memcpy(reinterpret_cast<void*>(address), &value, sizeof(value));
+ }
template <typename T>
T ShiftOperand(T value,
@@ -763,10 +842,10 @@ class Simulator : public DecoderVisitor {
static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack
- byte* stack_;
- static const intptr_t stack_protection_size_ = KB;
- intptr_t stack_size_;
- byte* stack_limit_;
+ uintptr_t stack_;
+ static const size_t stack_protection_size_ = KB;
+ size_t stack_size_;
+ uintptr_t stack_limit_;
Decoder<DispatchingDecoderVisitor>* decoder_;
Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
diff --git a/deps/v8/src/arm64/stub-cache-arm64.cc b/deps/v8/src/arm64/stub-cache-arm64.cc
deleted file mode 100644
index b7d43a4771..0000000000
--- a/deps/v8/src/arm64/stub-cache-arm64.cc
+++ /dev/null
@@ -1,1155 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(!AreAliased(receiver, scratch0, scratch1));
- DCHECK(name->IsUniqueName());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
- __ B(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
-
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ Bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- Register offset,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- // Some code below relies on the fact that the Entry struct contains
- // 3 pointers (name, code, map).
- STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- Label miss;
-
- DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
- // Multiply by 3 because there are 3 fields per entry.
- __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ Mov(scratch, key_offset);
- __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ Ldr(scratch2, MemOperand(scratch));
- __ Cmp(name, scratch2);
- __ B(ne, &miss);
-
- // Check the map matches.
- __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
- __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Cmp(scratch2, scratch3);
- __ B(ne, &miss);
-
- // Get the code entry from the cache.
- __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
- __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
- __ Cmp(scratch2.W(), flags);
- __ B(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ B(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ B(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
- __ Br(scratch);
-
- // Miss: fall through.
- __ Bind(&miss);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Make sure extra and extra2 registers are valid.
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Compute the hash for primary table.
- __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, flags);
- // We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kCacheIndexShift,
- CountTrailingZeros(kPrimaryTableSize, 64));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- __ Add(scratch, scratch, flags >> kCacheIndexShift);
- __ And(scratch, scratch, kSecondaryTableSize - 1);
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ Bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- __ Ldr(scratch, GlobalObjectMemOperand());
- __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Ldr(scratch, ContextMemOperand(scratch, index));
- __ Cmp(scratch, Operand(function));
- __ B(ne, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- // TryGetFunctionPrototype can't put the result directly in x0 because the
- // 3 inputs registers can't alias and we call this function from
- // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
- // move the result in x0.
- __ Mov(x0, scratch1);
- __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- __ Mov(scratch, Operand(cell));
- __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
-
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ Mov(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, int argc, Register* values) {
- DCHECK(!AreAliased(receiver, scratch));
-
- MacroAssembler::PushPopQueue queue(masm);
- queue.Queue(receiver);
- // Write the arguments to the stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!AreAliased(receiver, scratch, arg));
- queue.Queue(arg);
- }
- queue.PushQueued();
-
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = x0;
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Mov(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadObject(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadObject(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ LoadObject(call_data, api_call_info);
- __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ LoadObject(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(
- &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
- __ Mov(api_function_address, ref);
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ Bind(label);
- __ Mov(this->name(), Operand(name));
- }
-}
-
-
-// Generate StoreTransition code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and storage_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name registers
-// have their original values.
-void NamedStoreHandlerCompiler::GenerateStoreTransition(
- Handle<Map> transition, Handle<Name> name, Register receiver_reg,
- Register storage_reg, Register value_reg, Register scratch1,
- Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
- Label exit;
-
- DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg,
- scratch1, scratch2, scratch3));
-
- // We don't need scratch3.
- scratch3 = NoReg;
-
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
- __ LoadObject(scratch1, constant);
- __ Cmp(value_reg, scratch1);
- __ B(ne, miss_label);
- } else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = descriptors->GetFieldType(descriptor);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1, it.Current());
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- UseScratchRegisterScope temps(masm());
- DoubleRegister temp_double = temps.AcquireD();
- __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
-
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
-
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ Bind(&do_store);
- __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
- NoReg, MUTABLE);
- }
-
- // Stub never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Perform map transition for the receiver if necessary.
- if (details.type() == FIELD &&
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ Mov(scratch1, Operand(transition));
- __ Push(receiver_reg, scratch1, value_reg);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
- return;
- }
-
- // Update the map of the object.
- __ Mov(scratch1, Operand(transition));
- __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- DCHECK(value_reg.is(x0));
- __ Ret();
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= transition->inobject_properties();
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = transition->instance_size() + (index * kPointerSize);
- __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Str(prop_reg, FieldMemOperand(scratch1, offset));
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- __ Bind(&exit);
- // Return the value (register x0).
- DCHECK(value_reg.is(x0));
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
- Register value_reg,
- Label* miss_label) {
- DCHECK(lookup->representation().IsHeapObject());
- __ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
- __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1(), it.Current());
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
-
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- GenerateTailCall(masm(), stub.GetCode());
-}
-
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
-
- // object_reg and holder_reg registers can alias.
- DCHECK(!AreAliased(object_reg, scratch1, scratch2));
- DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(current.is_null() ||
- (current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound));
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- Register map_reg = scratch1;
- __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- UseScratchRegisterScope temps(masm());
- __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ Mov(reg, Operand(prototype));
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- __ Bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- GenerateRestoreName(miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(x0, value);
- __ Ret();
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build ExecutableAccessorInfo::args_ list on the stack and push property
- // name below the exit frame to make GC aware of them and store pointers to
- // them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-
- __ Push(receiver());
-
- if (heap()->InNewSpace(callback->data())) {
- __ Mov(scratch3(), Operand(callback));
- __ Ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
- }
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
- __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
-
- Register args_addr = scratch2();
- __ Add(args_addr, __ StackPointer(), kPointerSize);
-
- // Stack at this point:
- // sp[40] callback data
- // sp[32] undefined
- // sp[24] undefined
- // sp[16] isolate
- // args_addr -> sp[8] reg
- // sp[0] name
-
- // Abi for CallApiGetter.
- Register getter_address_reg = x2;
-
- // Set up the call.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ Mov(getter_address_reg, ref);
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
- DCHECK(!AreAliased(receiver(), this->name(),
- scratch1(), scratch2(), scratch3()));
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ JumpIfRoot(x0,
- Heap::kNoInterceptorResultSentinelRootIndex,
- &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ Bind(&interceptor_failed);
- if (must_preserve_receiver_reg) {
- __ Pop(this->name(), holder_reg, receiver());
- } else {
- __ Pop(this->name(), holder_reg);
- }
- // Leave the internal frame.
- }
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
- isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
- }
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
- Register holder_reg = Frontend(receiver(), name);
-
- // Stub never generated for non-global objects that require access checks.
- DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
-
- // receiver() and holder_reg can alias.
- DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
- DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
- __ Mov(scratch1(), Operand(callback));
- __ Mov(scratch2(), Operand(name));
- __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(x0);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- Label miss;
-
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
-
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
-// actually
-// used for KeyedStoreCompiler::transition_map(). We should verify which
-// registers are actually scratch registers, and which are important. For now,
-// we use the same assignments as ARM to remain on the safe side.
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, x3, x0, x4, x5 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(x3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, x3, x4, x5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
- FrontendHeader(receiver(), name, &miss);
-
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- __ Mov(result, Operand(cell));
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
- __ Ret();
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Label try_next;
- __ Cmp(map_reg, Operand(map));
- __ B(ne, &try_next);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, miss;
-
- Register result = x0;
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- __ JumpIfNotSmi(key, &miss);
- __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6);
- __ Ret();
-
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ Bind(&miss);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM64