summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/x87/code-generator-x87.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8/src/compiler/x87/code-generator-x87.cc
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadnode-new-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/compiler/x87/code-generator-x87.cc')
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc776
1 files changed, 532 insertions, 244 deletions
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index da7fdb481b..1064e622eb 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -42,7 +42,7 @@ class X87OperandConverter : public InstructionOperandConverter {
DCHECK(extra == 0);
return Operand(ToRegister(op));
}
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -53,12 +53,19 @@ class X87OperandConverter : public InstructionOperandConverter {
}
Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
+ DCHECK(op->IsFPStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kInt32 &&
+ (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+ constant.rmode());
+ }
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -107,8 +114,8 @@ class X87OperandConverter : public InstructionOperandConverter {
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
@@ -127,8 +134,8 @@ class X87OperandConverter : public InstructionOperandConverter {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
@@ -145,12 +152,12 @@ class X87OperandConverter : public InstructionOperandConverter {
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(Immediate(disp));
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
@@ -184,21 +191,35 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
Register const result_;
};
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat32NaN(CodeGenerator* gen, X87Register result)
+ : OutOfLineCode(gen), result_(result) {}
-class OutOfLineLoadFloat final : public OutOfLineCode {
+ void Generate() final {
+ DCHECK(result_.code() == 0);
+ USE(result_);
+ __ fstp(0);
+ __ push(Immediate(0xffc00000));
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ }
+
+ private:
+ X87Register const result_;
+};
+
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
public:
- OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
+ OutOfLineLoadFloat64NaN(CodeGenerator* gen, X87Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
DCHECK(result_.code() == 0);
USE(result_);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
__ fstp(0);
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0x7fffffff));
+ __ push(Immediate(0xfff80000));
+ __ push(Immediate(0x00000000));
__ fld_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
}
@@ -207,7 +228,6 @@ class OutOfLineLoadFloat final : public OutOfLineCode {
X87Register const result_;
};
-
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -268,25 +288,23 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- DCHECK(result.code() == 0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
- __ j(above_equal, ool->entry()); \
- __ fstp(0); \
- __ asm_instr(i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ DCHECK(result.code() == 0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ fstp(0); \
+ __ asm_instr(i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -364,31 +382,56 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ fstp(0); \
+ /* Load first operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4 + kDoubleSize)); \
+ /* Put first operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0 * kDoubleSize)); \
+ /* Load second operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put second operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 1 * kDoubleSize)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 4); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, 2 * kDoubleSize)); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ fstp(0); \
+ /* Load operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, kDoubleSize)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
}
-// For insert fninit/fld1 instructions after the Prologue
-thread_local bool is_block_0 = false;
-
-void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ add(esp, Immediate(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
@@ -433,19 +476,75 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ X87OperandConverter g(this, instr);
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ destination_location.index());
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(g.SlotToOperand(source_location.index()));
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ frame_access_state()->IncreaseSPDelta(1);
+ move->Eliminate();
+ }
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
X87OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
- // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
- if (is_block_0) {
- __ fninit();
- __ fld1();
- is_block_0 = false;
- }
-
switch (arch_opcode) {
case kArchCallCodeObject: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -463,7 +562,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -484,8 +583,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
@@ -499,6 +596,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -516,7 +622,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -543,14 +649,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
}
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -561,7 +666,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -577,7 +682,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ CallCFunction(func, num_parameters);
}
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -602,6 +707,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -612,7 +728,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -630,7 +746,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -650,11 +768,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kArchTruncateDoubleToI: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister());
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
@@ -689,6 +807,84 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(cos);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Expm1:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ // Keep the x87 FPU stack empty before calling stub code
+ __ fstp(0);
+ // Call the MathStub and put return value in stX_0
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ /* Return value is in st(0) on x87. */
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kIeee754Float64Sin:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(sin);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(tan);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -900,7 +1096,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -979,110 +1175,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ X87SetFPUCW(0x037F);
break;
}
- case kX87Float32Max: {
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = below;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
- // At least one NaN.
- // Return the second operands if one of the two operands is NaN
- __ j(parity_even, &return_right, Label::kNear);
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
-
- __ fadd(1);
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&return_right);
- __ fxch();
-
- __ bind(&return_left);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float32Min: {
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = above;
+ case kX87Float32Sqrt: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
- // At least one NaN.
- // Return the second operands if one of the two operands is NaN
- __ j(parity_even, &return_right, Label::kNear);
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- // Push st0 and st1 to stack, then pop them to temp registers and OR them,
- // load it to left.
- __ push(eax);
- __ fld(1);
- __ fld(1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, kPointerSize));
- __ pop(eax);
- __ xor_(MemOperand(esp, 0), eax);
- __ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ pop(eax); // restore esp
- __ pop(eax); // restore esp
- __ jmp(&return_left, Label::kNear);
-
-
- __ bind(&return_right);
- __ fxch();
-
- __ bind(&return_left);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kFloatSize));
break;
}
- case kX87Float32Sqrt: {
+ case kX87Float32Abs: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ fsqrt();
+ __ fabs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
- case kX87Float32Abs: {
+ case kX87Float32Neg: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ fabs();
+ __ fchs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
@@ -1092,10 +1212,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1180,9 +1300,44 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float32Max: {
+ Label compare_swap, done_compare;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fld(1);
+ __ fstp_s(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 0));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kFloatSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
+
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
+
+ __ bind(&done_compare);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
case kX87Float64Max: {
- Label check_zero, return_left, return_right;
- Condition condition = below;
+ Label compare_swap, done_compare;
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1192,29 +1347,69 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &return_right,
- Label::kNear); // At least one NaN, Return right.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fld(1);
+ __ fstp_d(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 4));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
- __ bind(&return_right);
- __ fxch();
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
- __ bind(&return_left);
+ __ bind(&done_compare);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float32Min: {
+ Label compare_swap, done_compare;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fld(0);
+ __ fstp_s(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 0));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kFloatSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
+
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
+
+ __ bind(&done_compare);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
case kX87Float64Min: {
- Label check_zero, return_left, return_right;
- Condition condition = above;
+ Label compare_swap, done_compare;
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1224,22 +1419,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &return_right,
- Label::kNear); // At least one NaN, return right value.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
- __ bind(&check_zero);
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kDoubleSize));
__ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ __ fstp_d(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 4));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
- __ bind(&return_right);
- __ fxch();
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
- __ bind(&return_left);
+ __ bind(&done_compare);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
@@ -1254,6 +1454,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, kDoubleSize));
break;
}
+ case kX87Float64Neg: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fchs();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ break;
+ }
case kX87Int32ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1333,13 +1543,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kX87Float32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1357,54 +1567,58 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float32ToUint32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
Label success;
__ TruncateX87TOSToI(i.OutputRegister(0));
__ test(i.OutputRegister(0), i.OutputRegister(0));
__ j(positive, &success);
+ // Need to reserve the input float32 data.
+ __ fld(0);
__ push(Immediate(INT32_MIN));
__ fild_s(Operand(esp, 0));
__ lea(esp, Operand(esp, kPointerSize));
__ faddp();
__ TruncateX87TOSToI(i.OutputRegister(0));
__ or_(i.OutputRegister(0), Immediate(0x80000000));
+ // Only keep input float32 data in x87 stack when return.
+ __ fstp(0);
__ bind(&success);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1419,7 +1633,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kX87Float64ToUint32: {
__ push_imm32(-2147483648);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ fild_s(Operand(esp, 0));
@@ -1429,13 +1643,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kInt32Size));
__ add(i.OutputRegister(), Immediate(0x80000000));
__ fstp(0);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ExtractHighWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
@@ -1443,13 +1657,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
}
break;
}
case kX87Float64ExtractLowWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, 0));
@@ -1457,7 +1671,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0));
}
break;
@@ -1496,10 +1710,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1517,6 +1731,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float64SilenceNaN: {
+ Label end, return_qnan;
+ __ fstp(0);
+ __ push(ebx);
+ // Load Half word of HoleNan(SNaN) into ebx
+ __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
+ __ cmp(ebx, Immediate(kHoleNanUpper32));
+ // Check input is HoleNaN(SNaN)?
+ __ j(equal, &return_qnan, Label::kNear);
+ // If input isn't HoleNaN(SNaN), just load it and return
+ __ fld_d(MemOperand(esp, 1 * kInt32Size));
+ __ jmp(&end);
+ __ bind(&return_qnan);
+ // If input is HoleNaN(SNaN), Return QNaN
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0xfff7ffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ bind(&end);
+ __ pop(ebx);
+ // Clear stack.
+ __ lea(esp, Operand(esp, 1 * kDoubleSize));
+ break;
+ }
case kX87Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1652,30 +1890,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fst_s(Operand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(Operand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ }
+ } else if (instr->InputAt(0)->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1693,12 +1933,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kX87Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_b(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_w(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg(i.InputRegister(index), operand);
+ break;
+ }
case kX87PushFloat32:
__ lea(esp, Operand(esp, -kFloatSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_s(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1706,10 +1964,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kX87PushFloat64:
__ lea(esp, Operand(esp, -kDoubleSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_d(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1731,10 +1989,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1761,7 +2019,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1837,7 +2106,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -1971,12 +2240,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -2107,8 +2380,25 @@ void CodeGenerator::AssembleDeoptimizerCall(
// | RET | args | caller frame |
// ^ esp ^ ebp
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ ++pushed;
+ }
+ frame->AllocateSavedCalleeRegisterSlots(pushed);
+ }
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
+}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -2120,7 +2410,9 @@ void CodeGenerator::AssemblePrologue() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2131,7 +2423,7 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
// Initailize FPU state.
__ fninit();
@@ -2139,8 +2431,8 @@ void CodeGenerator::AssemblePrologue() {
}
const RegList saves = descriptor->CalleeSavedRegisters();
- if (stack_shrink_slots > 0) {
- __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
}
if (saves != 0) { // Save callee-saved registers.
@@ -2151,7 +2443,6 @@ void CodeGenerator::AssemblePrologue() {
__ push(Register::from_code(i));
++pushed;
}
- frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
@@ -2263,7 +2554,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kInt32Size));
__ mov(MemOperand(esp, 0), Immediate(src));
// always only push one value into the x87 stack.
@@ -2271,7 +2562,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kInt32Size));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
@@ -2280,7 +2571,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -2289,15 +2580,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
- } else if (source->IsDoubleRegister()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPRegister()) {
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
@@ -2310,11 +2601,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
auto allocated = AllocatedOperand::cast(*source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
switch (allocated.representation()) {
@@ -2373,9 +2664,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
UNREACHABLE();
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2391,7 +2682,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2423,9 +2714,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;