summaryrefslogtreecommitdiff
path: root/deps/v8/src/s390/macro-assembler-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/s390/macro-assembler-s390.cc')
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc471
1 files changed, 158 insertions, 313 deletions
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index fbf82ccbc5..f087cc4c8a 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -1306,17 +1306,16 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- mov(r6, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_avtive =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ mov(r6, Operand(debug_hook_avtive));
LoadB(r6, MemOperand(r6));
- CmpP(r6, Operand(StepIn));
- blt(&skip_flooding);
+ CmpP(r6, Operand::Zero());
+ beq(&skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1332,7 +1331,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Push(new_target);
}
Push(fun, fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -1346,7 +1345,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1360,8 +1359,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r3));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -1579,25 +1578,18 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Set up allocation top address register.
Register top_address = scratch1;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- CmpP(result, alloc_limit);
+ CmpP(result, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1611,7 +1603,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Label aligned;
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
- CmpLogicalP(result, alloc_limit);
+ CmpLogicalP(result, MemOperand(top_address, limit - top));
bge(gc_required);
}
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1621,27 +1613,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
#endif
}
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- SubP(r0, alloc_limit, result);
- if (is_int16(object_size)) {
- CmpP(r0, Operand(object_size));
- blt(gc_required);
- AddP(result_end, result, Operand(object_size));
- } else {
- mov(result_end, Operand(object_size));
- CmpP(r0, result_end);
- blt(gc_required);
- AddP(result_end, result, result_end);
- }
+ AddP(result_end, result, Operand(object_size));
+
+ // Compare with allocation limit.
+ CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+ bge(gc_required);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
@@ -1676,24 +1667,17 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit..
+ // Load allocation top into result
LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- CmpP(result, alloc_limit);
+ CmpP(result, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1707,7 +1691,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Label aligned;
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
- CmpLogicalP(result, alloc_limit);
+ CmpLogicalP(result, MemOperand(top_address, limit - top));
bge(gc_required);
}
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1720,17 +1704,14 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
- SubP(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
- CmpP(r0, result_end);
- blt(gc_required);
AddP(result_end, result, result_end);
} else {
- CmpP(r0, object_size);
- blt(gc_required);
AddP(result_end, result, object_size);
}
+ CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+ bge(gc_required);
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
@@ -1742,8 +1723,15 @@ void MacroAssembler::Allocate(Register object_size, Register result,
StoreP(result_end, MemOperand(top_address));
}
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
@@ -1795,8 +1783,15 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
}
StoreP(result_end, MemOperand(top_address));
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
@@ -1837,103 +1832,34 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
#endif
}
+#if V8_TARGET_ARCH_S390X
+ // Limit to 64-bit only, as double alignment check above may adjust
+ // allocation top by an extra kDoubleSize/2.
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(object_size)) {
+ // Update allocation top.
+ AddP(MemOperand(top_address), Operand(object_size));
+ } else {
+ // Calculate new top using result.
+ AddP(result_end, result, Operand(object_size));
+ // Update allocation top.
+ StoreP(result_end, MemOperand(top_address));
+ }
+#else
// Calculate new top using result.
AddP(result_end, result, Operand(object_size));
-
- // The top pointer is not updated for allocation folding dominators.
+ // Update allocation top.
StoreP(result_end, MemOperand(top_address));
+#endif
- // Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-
- ShiftLeftP(scratch1, length, Operand(1)); // Length in bytes, not chars.
- AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
-
- AndP(scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- AddP(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- AndP(scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
+ // Tag object.
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::CompareObjectType(Register object, Register map,
@@ -1956,62 +1882,10 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
}
-void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- ble(fail);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
-void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- bgt(fail);
-}
-
void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
SmiUntag(ip, smi);
ConvertIntToDouble(ip, value);
}
-void MacroAssembler::StoreNumberToDoubleElements(
- Register value_reg, Register key_reg, Register elements_reg,
- Register scratch1, DoubleRegister double_scratch, Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label smi_value, store;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
- DONT_DO_SMI_CHECK);
-
- LoadDouble(double_scratch,
- FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
- CanonicalizeNaN(double_scratch);
- b(&store);
-
- bind(&smi_value);
- SmiToDouble(double_scratch, value_reg);
-
- bind(&store);
- SmiToDoubleArrayOffset(scratch1, key_reg);
- StoreDouble(double_scratch,
- FieldMemOperand(elements_reg, scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
Label* early_success) {
@@ -2491,23 +2365,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind, ElementsKind transitioned_kind,
- Register map_in_out, Register scratch, Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- LoadP(scratch, NativeContextMemOperand());
- LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- CmpP(map_in_out, ip);
- bne(no_map_match);
-
- // Use the transitioned cached map.
- LoadP(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, NativeContextMemOperand());
LoadP(dst, ContextMemOperand(dst, index));
@@ -2592,25 +2449,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
beq(smi_case);
}
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
- Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // We can more optimally use TestIfSmi if dst != src
- // otherwise, the UnTag operation will kill the CC and we cannot
- // test the Tag bit.
- if (src.code() != dst.code()) {
- SmiUntag(dst, src);
- TestIfSmi(src);
- } else {
- TestBit(src, 0, r0);
- SmiUntag(dst, src);
- LoadAndTestRR(r0, r0);
- }
- bne(non_smi_case);
-}
-
void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
Label* on_either_smi) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2881,20 +2719,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
bne(failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
-
- if (!scratch.is(type)) LoadRR(scratch, type);
- nilf(scratch, Operand(kFlatOneByteStringMask));
- CmpP(scratch, Operand(kFlatOneByteStringTag));
- bne(failure);
-}
-
static const int kRegisterPassedArguments = 5;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -3307,12 +3131,10 @@ void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
LoadB(dst, mem);
- lgbr(dst, dst);
} else if (r.IsUInteger8()) {
LoadlB(dst, mem);
} else if (r.IsInteger16()) {
LoadHalfWordP(dst, mem, scratch);
- lghr(dst, dst);
} else if (r.IsUInteger16()) {
LoadHalfWordP(dst, mem, scratch);
#if V8_TARGET_ARCH_S390X
@@ -3413,42 +3235,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
return no_reg;
}
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- LoadRR(current, object);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- beq(&end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- CmpP(scratch1, Operand(JS_OBJECT_TYPE));
- blt(found);
-
- LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
- beq(found);
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- bne(&loop_again);
-
- bind(&end);
-}
-
void MacroAssembler::mov(Register dst, const Operand& src) {
if (src.rmode_ != kRelocInfo_NONEPTR) {
// some form of relocation needed
@@ -3499,13 +3285,17 @@ void MacroAssembler::Mul64(Register dst, const Operand& src1) {
}
void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
- if (dst.is(src2)) {
- MulP(dst, src1);
- } else if (dst.is(src1)) {
- MulP(dst, src2);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ MulPWithCondition(dst, src1, src2);
} else {
- Move(dst, src1);
- MulP(dst, src2);
+ if (dst.is(src2)) {
+ MulP(dst, src1);
+ } else if (dst.is(src1)) {
+ MulP(dst, src2);
+ } else {
+ Move(dst, src1);
+ MulP(dst, src2);
+ }
}
}
@@ -3535,6 +3325,16 @@ void MacroAssembler::MulP(Register dst, Register src) {
#endif
}
+void MacroAssembler::MulPWithCondition(Register dst, Register src1,
+ Register src2) {
+ CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
+#if V8_TARGET_ARCH_S390X
+ msgrkc(dst, src1, src2);
+#else
+ msrkc(dst, src1, src2);
+#endif
+}
+
void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_uint16(opnd.offset())) {
@@ -3553,6 +3353,17 @@ void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#endif
}
+void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+ sqdbr(result, input);
+}
+void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+ if (is_uint12(input.offset())) {
+ sqdb(result, input);
+ } else {
+ ldy(result, input);
+ sqdbr(result, result);
+ }
+}
//----------------------------------------------------------------------------
// Add Instructions
//----------------------------------------------------------------------------
@@ -3955,8 +3766,8 @@ void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
}
void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
- sllg(src, src, Operand(32));
- ldgr(dst, src);
+ sllg(r0, src, Operand(32));
+ ldgr(dst, r0);
}
void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
@@ -4339,7 +4150,7 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
#endif
} else {
#if V8_TARGET_ARCH_S390X
- llilf(dst, opnd);
+ lgfi(dst, opnd);
#else
iilf(dst, opnd);
#endif
@@ -4359,6 +4170,19 @@ void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
#endif
}
+void MacroAssembler::LoadPositiveP(Register result, Register input) {
+#if V8_TARGET_ARCH_S390X
+ lpgr(result, input);
+#else
+ lpr(result, input);
+#endif
+}
+
+void MacroAssembler::LoadPositive32(Register result, Register input) {
+ lpr(result, input);
+ lgfr(result, result);
+}
+
//-----------------------------------------------------------------------------
// Compare Helpers
//-----------------------------------------------------------------------------
@@ -4532,9 +4356,16 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
uint32_t lo_32 = static_cast<uint32_t>(value);
// Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
- iihf(scratch, Operand(hi_32));
- iilf(scratch, Operand(lo_32));
- ldgr(result, scratch);
+ if (value == 0) {
+ lzdr(result);
+ } else if (lo_32 == 0) {
+ llihf(scratch, Operand(hi_32));
+ ldgr(result, scratch);
+ } else {
+ iihf(scratch, Operand(hi_32));
+ iilf(scratch, Operand(lo_32));
+ ldgr(result, scratch);
+ }
}
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
@@ -4545,19 +4376,19 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
Register scratch) {
- uint32_t hi_32 = bit_cast<uint32_t>(value);
- uint32_t lo_32 = 0;
-
- // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
- iihf(scratch, Operand(hi_32));
- iilf(scratch, Operand(lo_32));
- ldgr(result, scratch);
+ uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
+ << 32;
+ LoadDoubleLiteral(result, int_val, scratch);
}
void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- cgr(src1, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ cgr(src1, scratch);
+ }
#else
// CFI takes 32-bit immediate.
cfi(src1, Operand(smi));
@@ -4567,8 +4398,12 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- clgr(src1, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ clgr(src1, scratch);
+ }
#else
// CLFI takes 32-bit immediate
clfi(src1, Operand(smi));
@@ -4578,8 +4413,13 @@ void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- AddP(dst, src, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ AddP(dst, src, scratch);
+ }
#else
AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
#endif
@@ -4588,8 +4428,13 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- SubP(dst, src, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ SubP(dst, src, scratch);
+ }
#else
AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
#endif