summaryrefslogtreecommitdiff
path: root/chromium/v8/src/compiler/backend/ppc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/compiler/backend/ppc
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/compiler/backend/ppc')
-rw-r--r--chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc683
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc307
4 files changed, 1001 insertions, 175 deletions
diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index b7fece3f72d..56c5003d2e8 100644
--- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -1039,7 +1039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
-#if defined(_AIX)
+#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
num_parameters = kNumParametersMask & misc_field;
@@ -2164,6 +2164,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, r0);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2186,6 +2187,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, src);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2208,46 +2210,709 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
+#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
+ lane_width_in_bytes, input_register) \
+ int shift_bits = abs(lane_input - starting_lane_nummber) * \
+ lane_width_in_bytes * kBitsPerByte; \
+ if (shift_bits > 0) { \
+ __ li(ip, Operand(shift_bits)); \
+ __ mtvsrd(kScratchDoubleReg, ip); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ if (lane_input < starting_lane_nummber) { \
+ __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } else { \
+ DCHECK(lane_input > starting_lane_nummber); \
+ __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } \
+ input_register = kScratchDoubleReg; \
+ }
case kPPC_F64x2ExtractLane: {
- __ mfvsrd(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
+ __ mfvsrd(kScratchReg, src);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(kScratchReg, src);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- __ mfvsrd(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
+ __ mfvsrd(i.OutputRegister(), src);
break;
}
case kPPC_I32x4ExtractLane: {
- __ mfvsrwz(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(i.OutputRegister(), src);
break;
}
case kPPC_I16x8ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(16));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I16x8ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 16);
break;
}
case kPPC_I8x16ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(24));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I8x16ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 24);
break;
}
+#undef SHIFT_TO_CORRECT_LANE
+#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
+ lane_width_in_bytes) \
+ uint64_t mask = 0; \
+ for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
+ mask <<= kBitsPerByte; \
+ if (i >= lane * lane_width_in_bytes && \
+ i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
+ mask |= replacement_value_byte_lane + j; \
+ j++; \
+ } else { \
+ mask |= i; \
+ } \
+ if (i == (kSimd128Size / 2) - 1) { \
+ __ mov(r0, Operand(mask)); \
+ mask = 0; \
+ } else if (i >= kSimd128Size - 1) { \
+ __ mov(ip, Operand(mask)); \
+ } \
+ } \
+ /* Need to maintain 16 byte alignment for lvx */ \
+ __ addi(sp, sp, Operand(-24)); \
+ __ StoreP(ip, MemOperand(sp, 0)); \
+ __ StoreP(r0, MemOperand(sp, 8)); \
+ __ li(r0, Operand(0)); \
+ __ lvx(kScratchDoubleReg, MemOperand(sp, r0)); \
+ __ addi(sp, sp, Operand(24));
+ case kPPC_F64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, r0);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, kScratchReg);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 7 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 22;
+ constexpr int lane_width_in_bytes = 2;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 15 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 23;
+ constexpr int lane_width_in_bytes = 1;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+#undef GENERATE_REPLACE_LANE_MASK
+ case kPPC_F64x2Add: {
+ __ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Sub: {
+ __ xvsubdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Mul: {
+ __ xvmuldp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Add: {
+ __ vaddfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ constexpr int shift_bits = 32;
+ // generate first operand
+ __ vpkudum(dst, src1, src0);
+ // generate second operand
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(tempFPReg2, ip);
+ __ vspltb(tempFPReg2, tempFPReg2, Operand(7));
+ __ vsro(tempFPReg1, src0, tempFPReg2);
+ __ vsro(tempFPReg2, src1, tempFPReg2);
+ __ vpkudum(kScratchDoubleReg, tempFPReg2, tempFPReg1);
+ // add the operands
+ __ vaddfp(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_F32x4Sub: {
+ __ vsubfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Mul: {
+ __ xvmulsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Add: {
+ __ vaddudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Sub: {
+ __ vsubudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Mul: {
+ // Need to maintain 16 byte alignment for stvx and lvx.
+ __ addi(sp, sp, Operand(-40));
+ __ li(r0, Operand(0));
+ __ stvx(i.InputSimd128Register(0), MemOperand(sp, r0));
+ __ li(r0, Operand(16));
+ __ stvx(i.InputSimd128Register(1), MemOperand(sp, r0));
+ for (int i = 0; i < 2; i++) {
+ __ LoadP(r0, MemOperand(sp, kBitsPerByte * i));
+ __ LoadP(ip, MemOperand(sp, (kBitsPerByte * i) + kSimd128Size));
+ __ mulld(r0, r0, ip);
+ __ StoreP(r0, MemOperand(sp, i * kBitsPerByte));
+ }
+ __ li(r0, Operand(0));
+ __ lvx(i.OutputSimd128Register(), MemOperand(sp, r0));
+ __ addi(sp, sp, Operand(40));
+ break;
+ }
+ case kPPC_I32x4Add: {
+ __ vadduwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum2sws(dst, src0, kScratchDoubleReg);
+ __ vsum2sws(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkudum(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I32x4Sub: {
+ __ vsubuwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Mul: {
+ __ vmuluwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Add: {
+ __ vadduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum4shs(dst, src0, kScratchDoubleReg);
+ __ vsum4shs(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkuwus(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I16x8Sub: {
+ __ vsubuhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Mul: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmladduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Add: {
+ __ vaddubm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Sub: {
+ __ vsububm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Mul: {
+ __ vmuleub(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmuloub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vpkuhum(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kPPC_I64x2MinS: {
+ __ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinS: {
+ __ vminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MinU: {
+ __ vminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinU: {
+ __ vminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinS: {
+ __ vminsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinU: {
+ __ vminuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinS: {
+ __ vminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinU: {
+ __ vminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxS: {
+ __ vmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxS: {
+ __ vmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxU: {
+ __ vmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxU: {
+ __ vmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxS: {
+ __ vmaxsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxU: {
+ __ vmaxuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxS: {
+ __ vmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxU: {
+ __ vmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Eq: {
+ __ xvcmpeqdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Ne: {
+ __ xvcmpeqdp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F64x2Le: {
+ __ xvcmpgedp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2Lt: {
+ __ xvcmpgtdp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Eq: {
+ __ xvcmpeqsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Eq: {
+ __ vcmpequd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Eq: {
+ __ vcmpequw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Eq: {
+ __ vcmpequh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Eq: {
+ __ vcmpequb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Ne: {
+ __ xvcmpeqsp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2Ne: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4Ne: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8Ne: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Ne: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4Lt: {
+ __ xvcmpgtsp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Le: {
+ __ xvcmpgesp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2GtS: {
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtS: {
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2GeS: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4GeS: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2GtU: {
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtU: {
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+
+ break;
+ }
+ case kPPC_I64x2GeU: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+
+ break;
+ }
+ case kPPC_I32x4GeU: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtS: {
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeS: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtU: {
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeU: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtS: {
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeS: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtU: {
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeU: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+#define VECTOR_SHIFT(op) \
+ { \
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(1)); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ kScratchDoubleReg); \
+ }
+ case kPPC_I64x2Shl: {
+ VECTOR_SHIFT(vsld)
+ break;
+ }
+ case kPPC_I64x2ShrS: {
+ VECTOR_SHIFT(vsrad)
+ break;
+ }
+ case kPPC_I64x2ShrU: {
+ VECTOR_SHIFT(vsrd)
+ break;
+ }
+ case kPPC_I32x4Shl: {
+ VECTOR_SHIFT(vslw)
+ break;
+ }
+ case kPPC_I32x4ShrS: {
+ VECTOR_SHIFT(vsraw)
+ break;
+ }
+ case kPPC_I32x4ShrU: {
+ VECTOR_SHIFT(vsrw)
+ break;
+ }
+ case kPPC_I16x8Shl: {
+ VECTOR_SHIFT(vslh)
+ break;
+ }
+ case kPPC_I16x8ShrS: {
+ VECTOR_SHIFT(vsrah)
+ break;
+ }
+ case kPPC_I16x8ShrU: {
+ VECTOR_SHIFT(vsrh)
+ break;
+ }
+ case kPPC_I8x16Shl: {
+ VECTOR_SHIFT(vslb)
+ break;
+ }
+ case kPPC_I8x16ShrS: {
+ VECTOR_SHIFT(vsrab)
+ break;
+ }
+ case kPPC_I8x16ShrU: {
+ VECTOR_SHIFT(vsrb)
+ break;
+ }
+#undef VECTOR_SHIFT
+ case kPPC_S128And: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vand(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Or: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Xor: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vxor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(dst, dst, dst);
+ break;
+ }
+ case kPPC_S128Not: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vnor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Select: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register mask = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ __ vsel(dst, src2, src1, mask);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 4f6aeced6da..fdffc5f0963 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -192,18 +192,111 @@ namespace compiler {
V(PPC_AtomicXorInt64) \
V(PPC_F64x2Splat) \
V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
V(PPC_F32x4Splat) \
V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4AddHoriz) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
V(PPC_I64x2Splat) \
V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2MinS) \
+ V(PPC_I64x2MinU) \
+ V(PPC_I64x2MaxS) \
+ V(PPC_I64x2MaxU) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GtU) \
+ V(PPC_I64x2GeU) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4AddHoriz) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8AddHoriz) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16Mul) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Zero) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 68d0aaedc4b..b1d124432ef 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -115,18 +115,111 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
+ case kPPC_F64x2ReplaceLane:
+ case kPPC_F64x2Add:
+ case kPPC_F64x2Sub:
+ case kPPC_F64x2Mul:
+ case kPPC_F64x2Eq:
+ case kPPC_F64x2Ne:
+ case kPPC_F64x2Le:
+ case kPPC_F64x2Lt:
case kPPC_F32x4Splat:
case kPPC_F32x4ExtractLane:
+ case kPPC_F32x4ReplaceLane:
+ case kPPC_F32x4Add:
+ case kPPC_F32x4AddHoriz:
+ case kPPC_F32x4Sub:
+ case kPPC_F32x4Mul:
+ case kPPC_F32x4Eq:
+ case kPPC_F32x4Ne:
+ case kPPC_F32x4Lt:
+ case kPPC_F32x4Le:
case kPPC_I64x2Splat:
case kPPC_I64x2ExtractLane:
+ case kPPC_I64x2ReplaceLane:
+ case kPPC_I64x2Add:
+ case kPPC_I64x2Sub:
+ case kPPC_I64x2Mul:
+ case kPPC_I64x2MinS:
+ case kPPC_I64x2MinU:
+ case kPPC_I64x2MaxS:
+ case kPPC_I64x2MaxU:
+ case kPPC_I64x2Eq:
+ case kPPC_I64x2Ne:
+ case kPPC_I64x2GtS:
+ case kPPC_I64x2GtU:
+ case kPPC_I64x2GeU:
+ case kPPC_I64x2GeS:
+ case kPPC_I64x2Shl:
+ case kPPC_I64x2ShrS:
+ case kPPC_I64x2ShrU:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
+ case kPPC_I32x4ReplaceLane:
+ case kPPC_I32x4Add:
+ case kPPC_I32x4AddHoriz:
+ case kPPC_I32x4Sub:
+ case kPPC_I32x4Mul:
+ case kPPC_I32x4MinS:
+ case kPPC_I32x4MinU:
+ case kPPC_I32x4MaxS:
+ case kPPC_I32x4MaxU:
+ case kPPC_I32x4Eq:
+ case kPPC_I32x4Ne:
+ case kPPC_I32x4GtS:
+ case kPPC_I32x4GeS:
+ case kPPC_I32x4GtU:
+ case kPPC_I32x4GeU:
+ case kPPC_I32x4Shl:
+ case kPPC_I32x4ShrS:
+ case kPPC_I32x4ShrU:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
+ case kPPC_I16x8ReplaceLane:
+ case kPPC_I16x8Add:
+ case kPPC_I16x8AddHoriz:
+ case kPPC_I16x8Sub:
+ case kPPC_I16x8Mul:
+ case kPPC_I16x8MinS:
+ case kPPC_I16x8MinU:
+ case kPPC_I16x8MaxS:
+ case kPPC_I16x8MaxU:
+ case kPPC_I16x8Eq:
+ case kPPC_I16x8Ne:
+ case kPPC_I16x8GtS:
+ case kPPC_I16x8GeS:
+ case kPPC_I16x8GtU:
+ case kPPC_I16x8GeU:
+ case kPPC_I16x8Shl:
+ case kPPC_I16x8ShrS:
+ case kPPC_I16x8ShrU:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
+ case kPPC_I8x16ReplaceLane:
+ case kPPC_I8x16Add:
+ case kPPC_I8x16Sub:
+ case kPPC_I8x16Mul:
+ case kPPC_I8x16MinS:
+ case kPPC_I8x16MinU:
+ case kPPC_I8x16MaxS:
+ case kPPC_I8x16MaxU:
+ case kPPC_I8x16Eq:
+ case kPPC_I8x16Ne:
+ case kPPC_I8x16GtS:
+ case kPPC_I8x16GeS:
+ case kPPC_I8x16GtU:
+ case kPPC_I8x16GeU:
+ case kPPC_I8x16Shl:
+ case kPPC_I8x16ShrS:
+ case kPPC_I8x16ShrU:
+ case kPPC_S128And:
+ case kPPC_S128Or:
+ case kPPC_S128Xor:
+ case kPPC_S128Zero:
+ case kPPC_S128Not:
+ case kPPC_S128Select:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 1598fbad041..d5ec475a808 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2127,6 +2127,86 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Le) \
+ V(F64x2Lt) \
+ V(F32x4Add) \
+ V(F32x4AddHoriz) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I32x4Add) \
+ V(I32x4AddHoriz) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I16x8Add) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_UNOP_LIST(V) V(S128Not)
+
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
PPCOperandGenerator g(this); \
@@ -2135,7 +2215,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
}
SIMD_TYPES(SIMD_VISIT_SPLAT)
#undef SIMD_VISIT_SPLAT
-#undef SIMD_TYPES
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
@@ -2153,72 +2232,74 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ReplaceLane, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempSimd128Register()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
+
+#define SIMD_VISIT_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_VISIT_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
+#undef SIMD_VISIT_SHIFT
+#undef SIMD_SHIFT_LIST
+#undef SIMD_TYPES
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Zero(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Zero, g.DefineAsRegister(node));
+}
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Select, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2227,20 +2308,8 @@ void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
UNIMPLEMENTED();
}
@@ -2251,32 +2320,14 @@ void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2285,36 +2336,8 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
@@ -2338,12 +2361,6 @@ void InstructionSelector::EmitPrepareResults(
}
}
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
@@ -2352,8 +2369,6 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
@@ -2364,10 +2379,6 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2431,68 +2442,32 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }