summaryrefslogtreecommitdiff
path: root/chromium/v8/src/compiler/backend/mips64
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/compiler/backend/mips64
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/compiler/backend/mips64')
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc102
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc39
4 files changed, 155 insertions, 24 deletions
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 197167c01cd..9acd6459de5 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -2265,6 +2265,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt8(1));
break;
}
+ case kMips64F64x2Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_d(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F64x2Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_d(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
@@ -2581,6 +2601,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64F32x4Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_w(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F32x4Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_w(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2634,6 +2674,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I32x4BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_w(scratch0, src, 31);
+ __ srli_d(scratch1, scratch0, 31);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 2);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2820,6 +2875,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I16x8BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_h(scratch0, src, 15);
+ __ srli_w(scratch1, scratch0, 15);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 30);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 4);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3006,6 +3078,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I8x16BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_b(scratch0, src, 7);
+ __ srli_h(scratch1, scratch0, 7);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_w(scratch1, scratch0, 14);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 28);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ ilvev_b(scratch0, scratch1, scratch0);
+ __ copy_u_h(dst, scratch0, 0);
+ break;
+ }
case kMips64S128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3030,9 +3120,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AnyTrue:
- case kMips64S1x16AnyTrue: {
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AnyTrue:
+ case kMips64V8x16AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3043,7 +3133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMips64S1x4AllTrue: {
+ case kMips64V32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3054,7 +3144,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x8AllTrue: {
+ case kMips64V16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3065,7 +3155,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x16AllTrue: {
+ case kMips64V8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 9303b4572f3..0c42c059ea5 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -203,6 +203,8 @@ namespace compiler {
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
@@ -229,6 +231,8 @@ namespace compiler {
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
@@ -237,6 +241,7 @@ namespace compiler {
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -265,6 +270,7 @@ namespace compiler {
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
@@ -292,18 +298,19 @@ namespace compiler {
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
+ V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64S1x4AnyTrue) \
- V(Mips64S1x4AllTrue) \
- V(Mips64S1x8AnyTrue) \
- V(Mips64S1x8AllTrue) \
- V(Mips64S1x16AnyTrue) \
- V(Mips64S1x16AllTrue) \
+ V(Mips64V32x4AnyTrue) \
+ V(Mips64V32x4AllTrue) \
+ V(Mips64V16x8AnyTrue) \
+ V(Mips64V16x8AllTrue) \
+ V(Mips64V8x16AnyTrue) \
+ V(Mips64V8x16AllTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 81fc3b2ca9a..2f8a2722015 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Ne:
case kMips64F64x2Lt:
case kMips64F64x2Le:
+ case kMips64F64x2Pmin:
+ case kMips64F64x2Pmax:
case kMips64I64x2Splat:
case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane:
@@ -113,6 +115,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Splat:
case kMips64F32x4Sub:
case kMips64F32x4UConvertI32x4:
+ case kMips64F32x4Pmin:
+ case kMips64F32x4Pmax:
case kMips64F64x2Splat:
case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane:
@@ -171,6 +175,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8UConvertI8x16Low:
case kMips64I16x8RoundingAverageU:
case kMips64I16x8Abs:
+ case kMips64I16x8BitMask:
case kMips64I32x4Add:
case kMips64I32x4AddHoriz:
case kMips64I32x4Eq:
@@ -199,6 +204,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertI16x8High:
case kMips64I32x4UConvertI16x8Low:
case kMips64I32x4Abs:
+ case kMips64I32x4BitMask:
case kMips64I8x16Add:
case kMips64I8x16AddSaturateS:
case kMips64I8x16AddSaturateU:
@@ -226,6 +232,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSaturateU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
+ case kMips64I8x16BitMask:
case kMips64Ins:
case kMips64Lsa:
case kMips64MaxD:
@@ -265,12 +272,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64S1x16AllTrue:
- case kMips64S1x16AnyTrue:
- case kMips64S1x4AllTrue:
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AllTrue:
- case kMips64S1x8AnyTrue:
+ case kMips64V8x16AllTrue:
+ case kMips64V8x16AnyTrue:
+ case kMips64V32x4AllTrue:
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AllTrue:
+ case kMips64V16x8AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
case kMips64S32x4InterleaveLeft:
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 719a916b6a5..2c9c8d439b6 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -163,6 +163,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(
@@ -2778,21 +2786,24 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
V(I32x4Abs, kMips64I32x4Abs) \
+ V(I32x4BitMask, kMips64I32x4BitMask) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
V(I16x8Abs, kMips64I16x8Abs) \
+ V(I16x8BitMask, kMips64I16x8BitMask) \
V(I8x16Neg, kMips64I8x16Neg) \
V(I8x16Abs, kMips64I8x16Abs) \
+ V(I8x16BitMask, kMips64I8x16BitMask) \
V(S128Not, kMips64S128Not) \
- V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
- V(S1x4AllTrue, kMips64S1x4AllTrue) \
- V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
- V(S1x8AllTrue, kMips64S1x8AllTrue) \
- V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
- V(S1x16AllTrue, kMips64S1x16AllTrue)
+ V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
+ V(V32x4AllTrue, kMips64V32x4AllTrue) \
+ V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
+ V(V16x8AllTrue, kMips64V16x8AllTrue) \
+ V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
+ V(V8x16AllTrue, kMips64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -3099,6 +3110,22 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
g.TempImmediate(0));
}
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {