summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/backend/s390/code-generator-s390.cc')
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc219
1 files changed, 130 insertions, 89 deletions
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 27b2a5a853..510616a82f 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
#define kScratchReg ip
@@ -99,6 +99,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
*first_index += 3;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
InputInt32(index + 2));
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
}
UNREACHABLE();
}
@@ -209,12 +212,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
if (offset_ == no_reg) {
__ AddS64(scratch1_, object_, Operand(offset_immediate_));
} else {
@@ -321,6 +323,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Abs64:
case kS390_Abs32:
case kS390_Mul32:
+ case kS390_Mul64WithOverflow:
return nooverflow;
default:
break;
@@ -618,7 +621,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
#define ASSEMBLE_FLOAT_MODULO() \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -630,7 +633,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -642,7 +645,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -1020,20 +1023,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void FlushPendingPushRegisters(TurboAssembler* tasm,
+void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- tasm->Push((*pending_pushes)[0]);
+ masm->Push((*pending_pushes)[0]);
break;
case 2:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -1044,7 +1047,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
- TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -1052,15 +1055,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -1082,7 +1085,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- tasm(), frame_access_state(),
+ masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@@ -1092,20 +1095,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
- FlushPendingPushRegisters(tasm(), frame_access_state(),
+ FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
- FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -1121,7 +1124,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -1132,11 +1135,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
- __ LoadS32(ip,
- FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ __ LoadTaggedField(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset),
+ r0);
+ __ LoadU16(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
@@ -1218,7 +1220,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
@@ -1240,14 +1242,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- __ LoadTaggedPointerField(
- kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpS64(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadTaggedPointerField(r4,
- FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedField(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1351,7 +1352,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -1410,8 +1411,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
@@ -1725,7 +1725,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_BIN_OP(RRRInstr(MulHighU64), nullInstr, nullInstr);
break;
case kS390_MulHighS64:
- ASSEMBLE_BIN_OP(RRRInstr(MulHighS64), nullInstr, nullInstr);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ ASSEMBLE_BIN_OP(RRRInstr(MulHighS64), nullInstr, nullInstr);
+ } else {
+ __ Push(r2, r3, i.InputRegister(0), i.InputRegister(1));
+ __ Pop(r2, r3);
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, 0, kScratchReg);
+ __ CallCFunction(ExternalReference::int64_mul_high_function(), 2, 0);
+ }
+ __ mov(kScratchReg, r2);
+ __ Pop(r2, r3);
+ __ mov(i.OutputRegister(), kScratchReg);
+ }
break;
case kS390_MulFloat:
ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
@@ -3004,6 +3017,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
+
+ case kS390_I16x8DotI8x16S: {
+ __ I16x8DotI8x16S(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kS390_I32x4DotI8x16AddS: {
+ __ I32x4DotI8x16AddS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ break;
+ }
case kS390_I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -3175,14 +3200,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
}
- case kS390_LoadDecompressTaggedPointer: {
- CHECK(instr->HasOutput());
- __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- break;
- }
- case kS390_LoadDecompressAnyTagged: {
+ case kS390_LoadDecompressTagged: {
CHECK(instr->HasOutput());
- __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
break;
}
default:
@@ -3300,7 +3320,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
(op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
- op == kS390_Sub64 || op == kS390_Mul32));
+ op == kS390_Sub64 || op == kS390_Mul32 ||
+ op == kS390_Mul64WithOverflow));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
@@ -3404,6 +3425,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -3575,9 +3600,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
- __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
+ __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
@@ -3596,9 +3621,59 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
- // Must be kept in sync with {MoveTempLocationTo}.
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ S390OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
+ __ LoadU64(r1, g.ToMemOperand(source));
+ __ Push(r1);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // Bump the stack pointer and assemble the move.
+ __ lay(sp, MemOperand(sp, -(new_slots * kSystemPointerSize)));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ S390OperandConverter g(this, nullptr);
+ if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
+ __ Pop(r1);
+ __ StoreU64(r1, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ lay(sp, MemOperand(sp, new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ lay(sp, MemOperand(sp, temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
+ // Must be kept in sync with {MoveTempLocationTo}.
if (!IsFloatingPoint(rep) ||
((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use))) {
@@ -3609,24 +3684,8 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
DCHECK(!AreAliased(kScratchReg, r0, r1));
AssembleMove(source, &scratch);
} else {
- DCHECK(!source->IsRegister() && !source->IsStackSlot());
// The scratch register is blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- S390OperandConverter g(this, nullptr);
- if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
- __ LoadU64(r1, g.ToMemOperand(source));
- __ Push(r1);
- } else {
- // Bump the stack pointer and assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ lay(sp, MemOperand(sp, -(new_slots * kSystemPointerSize)));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -3641,32 +3700,14 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
DCHECK(!AreAliased(kScratchReg, r0, r1));
AssembleMove(&scratch, dest);
} else {
- DCHECK(!dest->IsRegister() && !dest->IsStackSlot());
- S390OperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
- __ Pop(r1);
- __ StoreU64(r1, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ lay(sp, MemOperand(sp, new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
move_cycle_ = MoveCycleState();
}
void CodeGenerator::SetPendingMove(MoveOperands* move) {
- if (move->source().IsFPStackSlot() && !move->destination().IsFPRegister()) {
- move_cycle_.pending_double_scratch_register_use = true;
- } else if (move->source().IsConstant() &&
- (move->destination().IsDoubleStackSlot() ||
- move->destination().IsFloatStackSlot())) {
+ if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
+ !move->destination().IsFPRegister()) {
move_cycle_.pending_double_scratch_register_use = true;
}
}
@@ -3736,7 +3777,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
__ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}