summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/arm64/code-generator-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/arm64/code-generator-arm64.cc')
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc374
1 files changed, 267 insertions, 107 deletions
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 41d4c5458d..e025236059 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -24,6 +24,18 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ DoubleRegister InputFloat32Register(int index) {
+ return InputDoubleRegister(index).S();
+ }
+
+ DoubleRegister InputFloat64Register(int index) {
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
+
+ DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+
Register InputRegister32(int index) {
return ToRegister(instr_->InputAt(index)).W();
}
@@ -106,9 +118,8 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
- MemOperand MemoryOperand() {
- int index = 0;
- return MemoryOperand(&index);
+ MemOperand MemoryOperand(int first_index = 0) {
+ return MemoryOperand(&first_index);
}
Operand ToOperand(InstructionOperand* op) {
@@ -142,6 +153,9 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(constant.ToExternalReference());
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
+ break;
}
UNREACHABLE();
return Operand(-1);
@@ -160,6 +174,106 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
};
+namespace {
+
+class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ Mov(result_, 0); }
+
+ private:
+ Register const result_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
+ do { \
+ auto result = i.OutputFloat##width##Register(); \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
+ __ B(hs, ool->entry()); \
+ __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister32(); \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ B(hs, ool->entry()); \
+ __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputFloat##width##Register(3); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ Str(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputRegister32(3); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
+ } while (0)
+
+
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -172,15 +286,6 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
} while (0)
-#define ASSEMBLE_TEST_AND_BRANCH(asm_instr, width) \
- do { \
- bool fallthrough = IsNextInAssemblyOrder(i.InputRpo(3)); \
- __ asm_instr(i.InputRegister##width(0), i.InputInt6(1), \
- GetLabel(i.InputRpo(2))); \
- if (!fallthrough) __ B(GetLabel(i.InputRpo(3))); \
- } while (0)
-
-
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
@@ -216,7 +321,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchJmp:
- __ B(GetLabel(i.InputRpo(0)));
+ AssembleArchJump(i.InputRpo(0));
break;
case kArchNop:
// don't emit code for nops.
@@ -418,6 +523,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Mov32:
__ Mov(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Sxtb32:
+ __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ case kArm64Sxth32:
+ __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
@@ -429,17 +540,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
- case kArm64Tbz:
- ASSEMBLE_TEST_AND_BRANCH(Tbz, 64);
- break;
- case kArm64Tbz32:
- ASSEMBLE_TEST_AND_BRANCH(Tbz, 32);
+ case kArm64TestAndBranch32:
+ case kArm64TestAndBranch:
+ // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
- case kArm64Tbnz:
- ASSEMBLE_TEST_AND_BRANCH(Tbnz, 64);
- break;
- case kArm64Tbnz32:
- ASSEMBLE_TEST_AND_BRANCH(Tbnz, 32);
+ case kArm64CompareAndBranch32:
+ // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
@@ -597,83 +703,154 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- }
-}
-
-
-// Assemble branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
- FlagsCondition condition) {
- Arm64OperandConverter i(this, instr);
- Label done;
-
- // Emit a branch. The true and false targets are always the last two inputs
- // to the instruction.
- BasicBlock::RpoNumber tblock =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
- BasicBlock::RpoNumber fblock =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
- bool fallthru = IsNextInAssemblyOrder(fblock);
- Label* tlabel = GetLabel(tblock);
- Label* flabel = fallthru ? &done : GetLabel(fblock);
- switch (condition) {
- case kUnorderedEqual:
- __ B(vs, flabel);
- // Fall through.
- case kEqual:
- __ B(eq, tlabel);
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
- case kUnorderedNotEqual:
- __ B(vs, tlabel);
- // Fall through.
- case kNotEqual:
- __ B(ne, tlabel);
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
break;
- case kSignedLessThan:
- __ B(lt, tlabel);
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
break;
- case kSignedGreaterThanOrEqual:
- __ B(ge, tlabel);
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
break;
- case kSignedLessThanOrEqual:
- __ B(le, tlabel);
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
break;
- case kSignedGreaterThan:
- __ B(gt, tlabel);
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
- case kUnorderedLessThan:
- __ B(vs, flabel);
- // Fall through.
- case kUnsignedLessThan:
- __ B(lo, tlabel);
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(64);
break;
- case kUnorderedGreaterThanOrEqual:
- __ B(vs, tlabel);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- __ B(hs, tlabel);
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
break;
- case kUnorderedLessThanOrEqual:
- __ B(vs, flabel);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- __ B(ls, tlabel);
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
break;
- case kUnorderedGreaterThan:
- __ B(vs, tlabel);
- // Fall through.
- case kUnsignedGreaterThan:
- __ B(hi, tlabel);
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Str);
break;
- case kOverflow:
- __ B(vs, tlabel);
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
- case kNotOverflow:
- __ B(vc, tlabel);
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
- if (!fallthru) __ B(flabel); // no fallthru to flabel.
- __ Bind(&done);
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Arm64OperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ FlagsCondition condition = branch->condition;
+ ArchOpcode opcode = instr->arch_opcode();
+
+ if (opcode == kArm64CompareAndBranch32) {
+ switch (condition) {
+ case kEqual:
+ __ Cbz(i.InputRegister32(0), tlabel);
+ break;
+ case kNotEqual:
+ __ Cbnz(i.InputRegister32(0), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (opcode == kArm64TestAndBranch32) {
+ switch (condition) {
+ case kEqual:
+ __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+ break;
+ case kNotEqual:
+ __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (opcode == kArm64TestAndBranch) {
+ switch (condition) {
+ case kEqual:
+ __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+ break;
+ case kNotEqual:
+ __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (condition) {
+ case kUnorderedEqual:
+ // The "eq" condition will not catch the unordered case.
+ // The jump/fall through to false label will be used if the comparison
+ // was unordered.
+ case kEqual:
+ __ B(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ // Unordered or not equal can be tested with "ne" condtion.
+ // See ARMv8 manual C1.2.3 - Condition Code.
+ case kNotEqual:
+ __ B(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ B(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ B(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ B(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ B(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ // The "lo" condition will not catch the unordered case.
+ // The jump/fall through to false label will be used if the comparison
+ // was unordered.
+ case kUnsignedLessThan:
+ __ B(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ // Unordered, greater than or equal can be tested with "hs" condtion.
+ // See ARMv8 manual C1.2.3 - Condition Code.
+ case kUnsignedGreaterThanOrEqual:
+ __ B(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ // The "ls" condition will not catch the unordered case.
+ // The jump/fall through to false label will be used if the comparison
+ // was unordered.
+ case kUnsignedLessThanOrEqual:
+ __ B(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ // Unordered or greater than can be tested with "hi" condtion.
+ // See ARMv8 manual C1.2.3 - Condition Code.
+ case kUnsignedGreaterThan:
+ __ B(hi, tlabel);
+ break;
+ case kOverflow:
+ __ B(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ B(vc, tlabel);
+ break;
+ }
+ }
+ if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
@@ -757,7 +934,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = vc;
break;
}
- __ bind(&check);
+ __ Bind(&check);
__ Cset(reg, cc);
__ Bind(&done);
}
@@ -792,23 +969,6 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
- Label ok;
- // +2 for return address and saved frame pointer.
- int receiver_slot = info->scope()->num_parameters() + 2;
- __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
- __ Bind(&ok);
- }
-
} else {
__ SetStackPointer(jssp);
__ StubPrologue();
@@ -966,8 +1126,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
UseScratchRegisterScope scope(masm());
- CPURegister temp_0 = scope.AcquireX();
- CPURegister temp_1 = scope.AcquireX();
+ DoubleRegister temp_0 = scope.AcquireD();
+ DoubleRegister temp_1 = scope.AcquireD();
MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, masm());
__ Ldr(temp_0, src);