summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-06-06 10:28:14 +0200
committerMichaël Zasso <targos@protonmail.com>2017-06-07 10:33:31 +0200
commit3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09 (patch)
tree9dee56e142638b34f1eccbd0ad88c3bce5377c29 /deps/v8/src/compiler
parent91a1bbe3055a660194ca4d403795aa0c03e9d056 (diff)
downloadnode-new-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.gz
deps: update V8 to 5.9.211.32
PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS7
-rw-r--r--deps/v8/src/compiler/access-builder.cc10
-rw-r--r--deps/v8/src/compiler/access-builder.h4
-rw-r--r--deps/v8/src/compiler/access-info.cc17
-rw-r--r--deps/v8/src/compiler/access-info.h5
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc627
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h206
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc206
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc515
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc167
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc151
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc2
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc39
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc8
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc3
-rw-r--r--deps/v8/src/compiler/branch-elimination.h2
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc688
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h72
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc201
-rw-r--r--deps/v8/src/compiler/code-assembler.h73
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h9
-rw-r--r--deps/v8/src/compiler/code-generator.cc19
-rw-r--r--deps/v8/src/compiler/code-generator.h2
-rw-r--r--deps/v8/src/compiler/common-operator.cc25
-rw-r--r--deps/v8/src/compiler/common-operator.h8
-rw-r--r--deps/v8/src/compiler/control-equivalence.cc16
-rw-r--r--deps/v8/src/compiler/control-equivalence.h46
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc288
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h6
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc136
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc78
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc7
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc8
-rw-r--r--deps/v8/src/compiler/graph-assembler.h3
-rw-r--r--deps/v8/src/compiler/graph-replay.cc92
-rw-r--r--deps/v8/src/compiler/graph-replay.h40
-rw-r--r--deps/v8/src/compiler/graph.h53
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc190
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h10
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc12
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc226
-rw-r--r--deps/v8/src/compiler/instruction-codes.h35
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc37
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc822
-rw-r--r--deps/v8/src/compiler/instruction-selector.h5
-rw-r--r--deps/v8/src/compiler/instruction.h74
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc61
-rw-r--r--deps/v8/src/compiler/int64-lowering.h6
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc332
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc110
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h15
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc17
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h11
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc109
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc215
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc26
-rw-r--r--deps/v8/src/compiler/js-inlining.cc89
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc41
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h4
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc287
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h21
-rw-r--r--deps/v8/src/compiler/js-operator.cc112
-rw-r--r--deps/v8/src/compiler/js-operator.h94
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc215
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h56
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc293
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h7
-rw-r--r--deps/v8/src/compiler/linkage.cc2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc57
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc5
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc50
-rw-r--r--deps/v8/src/compiler/machine-operator.cc607
-rw-r--r--deps/v8/src/compiler/machine-operator.h347
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc212
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h24
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc152
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc184
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h24
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc158
-rw-r--r--deps/v8/src/compiler/node-matchers.h3
-rw-r--r--deps/v8/src/compiler/node-properties.cc7
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/opcodes.h396
-rw-r--r--deps/v8/src/compiler/operation-typer.cc44
-rw-r--r--deps/v8/src/compiler/operation-typer.h7
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/osr.cc3
-rw-r--r--deps/v8/src/compiler/pipeline.cc82
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc69
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc60
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc40
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h41
-rw-r--r--deps/v8/src/compiler/register-allocator.cc7
-rw-r--r--deps/v8/src/compiler/representation-change.cc93
-rw-r--r--deps/v8/src/compiler/representation-change.h82
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc962
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h2
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc2
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc1446
-rw-r--r--deps/v8/src/compiler/schedule.cc25
-rw-r--r--deps/v8/src/compiler/schedule.h12
-rw-r--r--deps/v8/src/compiler/scheduler.cc64
-rw-r--r--deps/v8/src/compiler/scheduler.h10
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc423
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h40
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc269
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc206
-rw-r--r--deps/v8/src/compiler/simplified-operator.h41
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc90
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc29
-rw-r--r--deps/v8/src/compiler/typed-optimization.h2
-rw-r--r--deps/v8/src/compiler/typer.cc128
-rw-r--r--deps/v8/src/compiler/types.cc4
-rw-r--r--deps/v8/src/compiler/types.h4
-rw-r--r--deps/v8/src/compiler/verifier.cc92
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1363
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h57
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc2
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc282
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h25
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc27
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc282
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc1
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc8
128 files changed, 10559 insertions, 5484 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 10ffcb0f1a..015bf85758 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,9 +1,14 @@
set noparent
bmeurer@chromium.org
-epertoso@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
+tebbi@chromium.org
+
+per-file wasm-*=ahaas@chromium.org
+per-file wasm-*=clemensh@chromium.org
+
+per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 2722590c76..11925a84db 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -200,6 +200,16 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
}
// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kRegisterFileOffset,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 9d23220e82..668a720740 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -82,6 +82,10 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
+ // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
+ // field.
+ static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
+
// Provides access to JSGeneratorObject::register_file() field.
static FieldAccess ForJSGeneratorObjectRegisterFile();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 8fef2f079c..c3096e9974 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -97,12 +97,6 @@ PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
}
-// static
-PropertyAccessInfo PropertyAccessInfo::Generic(MapList const& receiver_maps) {
- return PropertyAccessInfo(kGeneric, MaybeHandle<JSObject>(), Handle<Object>(),
- receiver_maps);
-}
-
PropertyAccessInfo::PropertyAccessInfo()
: kind_(kInvalid),
field_representation_(MachineRepresentation::kNone),
@@ -177,8 +171,7 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
return false;
}
- case kNotFound:
- case kGeneric: {
+ case kNotFound: {
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -236,6 +229,7 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
if (transition_target == nullptr) {
receiver_maps.Add(map);
} else {
+ DCHECK(!map->is_stable());
transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
@@ -372,9 +366,6 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (!optimization.is_simple_api_call()) {
return false;
}
- if (optimization.api_call_info()->fast_handler()->IsCode()) {
- return false;
- }
if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
if (access_mode == AccessMode::kLoad) {
@@ -518,10 +509,6 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
MaybeHandle<JSObject> holder,
PropertyAccessInfo* access_info) {
// Check if the {map} has a data transition with the given {name}.
- if (map->unused_property_fields() == 0) {
- *access_info = PropertyAccessInfo::Generic(MapList{map});
- return true;
- }
Handle<Map> transition_map;
if (TransitionArray::SearchTransition(map, kData, name, NONE)
.ToHandle(&transition_map)) {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 42fa1db1ad..809aa83e47 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -63,8 +63,7 @@ class PropertyAccessInfo final {
kDataConstant,
kDataField,
kDataConstantField,
- kAccessorConstant,
- kGeneric
+ kAccessorConstant
};
static PropertyAccessInfo NotFound(MapList const& receiver_maps,
@@ -81,7 +80,6 @@ class PropertyAccessInfo final {
static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo Generic(MapList const& receiver_maps);
PropertyAccessInfo();
@@ -94,7 +92,6 @@ class PropertyAccessInfo final {
// is done.
bool IsDataConstantField() const { return kind() == kDataConstantField; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
- bool IsGeneric() const { return kind() == kGeneric; }
bool HasTransitionMap() const { return !transition_map().is_null(); }
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 82039c8d2e..f2b7912ec5 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -5,11 +5,13 @@
#include "src/compiler/code-generator.h"
#include "src/arm/macro-assembler-arm.h"
+#include "src/assembler-inl.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
@@ -420,6 +422,51 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ dmb(ISH); \
+ __ bind(&exchange); \
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(2), i.TempRegister(0)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &exchange); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ dmb(ISH); \
+ __ bind(&compareExchange); \
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
+ __ teq(i.TempRegister(1), Operand(i.OutputRegister(0))); \
+ __ b(ne, &exit); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(0)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &compareExchange); \
+ __ bind(&exit); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(1), i.TempRegister(1), i.TempRegister(0)); \
+ __ teq(i.TempRegister(1), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -1411,6 +1458,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vstr(i.InputFloatRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmVld1F64: {
+ __ vld1(Neon8, NeonListOperand(i.OutputDoubleRegister()),
+ NeonMemOperand(i.InputRegister(0)));
+ break;
+ }
+ case kArmVst1F64: {
+ __ vst1(Neon8, NeonListOperand(i.InputDoubleRegister(0)),
+ NeonMemOperand(i.InputRegister(1)));
+ break;
+ }
+ case kArmVld1S128: {
+ __ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
+ NeonMemOperand(i.InputRegister(0)));
+ break;
+ }
+ case kArmVst1S128: {
+ __ vst1(Neon8, NeonListOperand(i.InputSimd128Register(0)),
+ NeonMemOperand(i.InputRegister(1)));
+ break;
+ }
case kArmVldrF64:
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1504,438 +1571,650 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmFloat32x4Splat: {
+ case kArmF32x4Splat: {
__ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
break;
}
- case kArmFloat32x4ExtractLane: {
+ case kArmF32x4ExtractLane: {
__ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
kScratchReg, i.InputInt8(1));
break;
}
- case kArmFloat32x4ReplaceLane: {
+ case kArmF32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
break;
}
- case kArmFloat32x4FromInt32x4: {
+ case kArmF32x4SConvertI32x4: {
__ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmFloat32x4FromUint32x4: {
+ case kArmF32x4UConvertI32x4: {
__ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmFloat32x4Abs: {
+ case kArmF32x4Abs: {
__ vabs(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmFloat32x4Neg: {
+ case kArmF32x4Neg: {
__ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmFloat32x4Add: {
+ case kArmF32x4RecipApprox: {
+ __ vrecpe(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmF32x4RecipSqrtApprox: {
+ __ vrsqrte(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmF32x4Add: {
__ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmFloat32x4Sub: {
+ case kArmF32x4Sub: {
__ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmFloat32x4Equal: {
+ case kArmF32x4Mul: {
+ __ vmul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmF32x4Min: {
+ __ vmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmF32x4Max: {
+ __ vmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmF32x4RecipRefine: {
+ __ vrecps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmF32x4RecipSqrtRefine: {
+ __ vrsqrts(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmF32x4Eq: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmFloat32x4NotEqual: {
+ case kArmF32x4Ne: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
- case kArmInt32x4Splat: {
+ case kArmF32x4Lt: {
+ __ vcgt(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kArmF32x4Le: {
+ __ vcge(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
}
- case kArmInt32x4ExtractLane: {
+ case kArmI32x4ExtractLane: {
__ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
i.InputInt8(1));
break;
}
- case kArmInt32x4ReplaceLane: {
+ case kArmI32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(2), NeonS32, i.InputInt8(1));
break;
}
- case kArmInt32x4FromFloat32x4: {
+ case kArmI32x4SConvertF32x4: {
__ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmUint32x4FromFloat32x4: {
- __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ case kArmI32x4SConvertI16x8Low: {
+ __ vmovl(NeonS16, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).low());
+ break;
+ }
+ case kArmI32x4SConvertI16x8High: {
+ __ vmovl(NeonS16, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).high());
break;
}
- case kArmInt32x4Neg: {
+ case kArmI32x4Neg: {
__ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmInt32x4ShiftLeftByScalar: {
+ case kArmI32x4Shl: {
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
- case kArmInt32x4ShiftRightByScalar: {
+ case kArmI32x4ShrS: {
__ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
- case kArmInt32x4Add: {
+ case kArmI32x4Add: {
__ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Sub: {
+ case kArmI32x4Sub: {
__ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Mul: {
+ case kArmI32x4Mul: {
__ vmul(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Min: {
+ case kArmI32x4MinS: {
__ vmin(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Max: {
+ case kArmI32x4MaxS: {
__ vmax(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Equal: {
+ case kArmI32x4Eq: {
__ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4NotEqual: {
+ case kArmI32x4Ne: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(Neon32, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
- case kArmInt32x4GreaterThan: {
- __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI32x4LtS: {
+ __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmInt32x4GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonS32, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI32x4LeS: {
+ __ vcge(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI32x4UConvertF32x4: {
+ __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI32x4UConvertI16x8Low: {
+ __ vmovl(NeonU16, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).low());
break;
}
- case kArmUint32x4ShiftRightByScalar: {
+ case kArmI32x4UConvertI16x8High: {
+ __ vmovl(NeonU16, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).high());
+ break;
+ }
+ case kArmI32x4ShrU: {
__ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
- case kArmUint32x4Min: {
+ case kArmI32x4MinU: {
__ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint32x4Max: {
+ case kArmI32x4MaxU: {
__ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint32x4GreaterThan: {
- __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI32x4LtU: {
+ __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmUint32x4GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonU32, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI32x4LeU: {
+ __ vcge(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmInt16x8Splat: {
+ case kArmI16x8Splat: {
__ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
break;
}
- case kArmInt16x8ExtractLane: {
+ case kArmI16x8ExtractLane: {
__ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
i.InputInt8(1));
break;
}
- case kArmInt16x8ReplaceLane: {
+ case kArmI16x8ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(2), NeonS16, i.InputInt8(1));
break;
}
- case kArmInt16x8Neg: {
+ case kArmI16x8SConvertI8x16Low: {
+ __ vmovl(NeonS8, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).low());
+ break;
+ }
+ case kArmI16x8SConvertI8x16High: {
+ __ vmovl(NeonS8, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).high());
+ break;
+ }
+ case kArmI16x8Neg: {
__ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmInt16x8ShiftLeftByScalar: {
+ case kArmI16x8Shl: {
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
- case kArmInt16x8ShiftRightByScalar: {
+ case kArmI16x8ShrS: {
__ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
- case kArmInt16x8Add: {
+ case kArmI16x8SConvertI32x4: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Take care not to overwrite a source register before it's used.
+ if (dst.is(src0) && dst.is(src1)) {
+ __ vqmovn(NeonS16, dst.low(), src0);
+ __ vmov(dst.high(), dst.low());
+ } else if (dst.is(src0)) {
+ // dst is src0, so narrow src0 first.
+ __ vqmovn(NeonS16, dst.low(), src0);
+ __ vqmovn(NeonS16, dst.high(), src1);
+ } else {
+ // dst may alias src1, so narrow src1 first.
+ __ vqmovn(NeonS16, dst.high(), src1);
+ __ vqmovn(NeonS16, dst.low(), src0);
+ }
+ break;
+ }
+ case kArmI16x8Add: {
__ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8AddSaturate: {
+ case kArmI16x8AddSaturateS: {
__ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8Sub: {
+ case kArmI16x8Sub: {
__ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8SubSaturate: {
+ case kArmI16x8SubSaturateS: {
__ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8Mul: {
+ case kArmI16x8Mul: {
__ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8Min: {
+ case kArmI16x8MinS: {
__ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8Max: {
+ case kArmI16x8MaxS: {
__ vmax(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8Equal: {
+ case kArmI16x8Eq: {
__ vceq(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt16x8NotEqual: {
+ case kArmI16x8Ne: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(Neon16, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
- case kArmInt16x8GreaterThan: {
- __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI16x8LtS: {
+ __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmInt16x8GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonS16, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI16x8LeS: {
+ __ vcge(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI16x8UConvertI8x16Low: {
+ __ vmovl(NeonU8, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).low());
+ break;
+ }
+ case kArmI16x8UConvertI8x16High: {
+ __ vmovl(NeonU8, i.OutputSimd128Register(),
+ i.InputSimd128Register(0).high());
break;
}
- case kArmUint16x8ShiftRightByScalar: {
+ case kArmI16x8ShrU: {
__ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
- case kArmUint16x8AddSaturate: {
+ case kArmI16x8UConvertI32x4: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Take care not to overwrite a source register before it's used.
+ if (dst.is(src0) && dst.is(src1)) {
+ __ vqmovn(NeonU16, dst.low(), src0);
+ __ vmov(dst.high(), dst.low());
+ } else if (dst.is(src0)) {
+ // dst is src0, so narrow src0 first.
+ __ vqmovn(NeonU16, dst.low(), src0);
+ __ vqmovn(NeonU16, dst.high(), src1);
+ } else {
+ // dst may alias src1, so narrow src1 first.
+ __ vqmovn(NeonU16, dst.high(), src1);
+ __ vqmovn(NeonU16, dst.low(), src0);
+ }
+ break;
+ }
+ case kArmI16x8AddSaturateU: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint16x8SubSaturate: {
+ case kArmI16x8SubSaturateU: {
__ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint16x8Min: {
+ case kArmI16x8MinU: {
__ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint16x8Max: {
+ case kArmI16x8MaxU: {
__ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint16x8GreaterThan: {
- __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI16x8LtU: {
+ __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmUint16x8GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonU16, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI16x8LeU: {
+ __ vcge(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmInt8x16Splat: {
+ case kArmI8x16Splat: {
__ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0));
break;
}
- case kArmInt8x16ExtractLane: {
+ case kArmI8x16ExtractLane: {
__ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
i.InputInt8(1));
break;
}
- case kArmInt8x16ReplaceLane: {
+ case kArmI8x16ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(2), NeonS8, i.InputInt8(1));
break;
}
- case kArmInt8x16Neg: {
+ case kArmI8x16Neg: {
__ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmInt8x16ShiftLeftByScalar: {
+ case kArmI8x16Shl: {
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
- case kArmInt8x16ShiftRightByScalar: {
+ case kArmI8x16ShrS: {
__ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
- case kArmInt8x16Add: {
+ case kArmI8x16SConvertI16x8: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Take care not to overwrite a source register before it's used.
+ if (dst.is(src0) && dst.is(src1)) {
+ __ vqmovn(NeonS8, dst.low(), src0);
+ __ vmov(dst.high(), dst.low());
+ } else if (dst.is(src0)) {
+ // dst is src0, so narrow src0 first.
+ __ vqmovn(NeonS8, dst.low(), src0);
+ __ vqmovn(NeonS8, dst.high(), src1);
+ } else {
+ // dst may alias src1, so narrow src1 first.
+ __ vqmovn(NeonS8, dst.high(), src1);
+ __ vqmovn(NeonS8, dst.low(), src0);
+ }
+ break;
+ }
+ case kArmI8x16Add: {
__ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16AddSaturate: {
+ case kArmI8x16AddSaturateS: {
__ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16Sub: {
+ case kArmI8x16Sub: {
__ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16SubSaturate: {
+ case kArmI8x16SubSaturateS: {
__ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16Mul: {
+ case kArmI8x16Mul: {
__ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16Min: {
+ case kArmI8x16MinS: {
__ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16Max: {
+ case kArmI8x16MaxS: {
__ vmax(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16Equal: {
+ case kArmI8x16Eq: {
__ vceq(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt8x16NotEqual: {
+ case kArmI8x16Ne: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(Neon8, dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
- case kArmInt8x16GreaterThan: {
- __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI8x16LtS: {
+ __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmInt8x16GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonS8, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI8x16LeS: {
+ __ vcge(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmUint8x16ShiftRightByScalar: {
+ case kArmI8x16ShrU: {
__ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
- case kArmUint8x16AddSaturate: {
+ case kArmI8x16UConvertI16x8: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Take care not to overwrite a source register before it's used.
+ if (dst.is(src0) && dst.is(src1)) {
+ __ vqmovn(NeonU8, dst.low(), src0);
+ __ vmov(dst.high(), dst.low());
+ } else if (dst.is(src0)) {
+ // dst is src0, so narrow src0 first.
+ __ vqmovn(NeonU8, dst.low(), src0);
+ __ vqmovn(NeonU8, dst.high(), src1);
+ } else {
+ // dst may alias src1, so narrow src1 first.
+ __ vqmovn(NeonU8, dst.high(), src1);
+ __ vqmovn(NeonU8, dst.low(), src0);
+ }
+ break;
+ }
+ case kArmI8x16AddSaturateU: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint8x16SubSaturate: {
+ case kArmI8x16SubSaturateU: {
__ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint8x16Min: {
+ case kArmI8x16MinU: {
__ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint8x16Max: {
+ case kArmI8x16MaxU: {
__ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmUint8x16GreaterThan: {
- __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI8x16LtU: {
+ __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmUint8x16GreaterThanOrEqual: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vcge(NeonU8, dst, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kArmI8x16LeU: {
+ __ vcge(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kArmSimd128And: {
+ case kArmS128Zero: {
+ __ veor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kArmS128And: {
__ vand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmSimd128Or: {
+ case kArmS128Or: {
__ vorr(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmSimd128Xor: {
+ case kArmS128Xor: {
__ veor(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmSimd128Not: {
+ case kArmS128Not: {
__ vmvn(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmSimd32x4Select:
- case kArmSimd16x8Select:
- case kArmSimd8x16Select: {
+ case kArmS128Select: {
// vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
break;
}
+ case kArmS1x4AnyTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmax(NeonU32, kScratchDoubleReg, src.low(), src.high());
+ __ vpmax(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ break;
+ }
+ case kArmS1x4AllTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmin(NeonU32, kScratchDoubleReg, src.low(), src.high());
+ __ vpmin(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ break;
+ }
+ case kArmS1x8AnyTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmax(NeonU16, kScratchDoubleReg, src.low(), src.high());
+ __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ break;
+ }
+ case kArmS1x8AllTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmin(NeonU16, kScratchDoubleReg, src.low(), src.high());
+ __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ break;
+ }
+ case kArmS1x16AnyTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmax(NeonU8, kScratchDoubleReg, src.low(), src.high());
+ __ vpmax(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ // vtst to detect any bits in the bottom 32 bits of kScratchDoubleReg.
+ // This saves an instruction vs. the naive sequence of vpmax.
+ // kDoubleRegZero is not changed, since it is 0.
+ __ vtst(Neon32, kScratchQuadReg, kScratchQuadReg, kScratchQuadReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ break;
+ }
+ case kArmS1x16AllTrue: {
+ const QwNeonRegister& src = i.InputSimd128Register(0);
+ __ vpmin(NeonU8, kScratchDoubleReg, src.low(), src.high());
+ __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
@@ -2002,6 +2281,69 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
break;
+ case kAtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
+ break;
+ case kAtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
+ break;
+ case kAtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
+ break;
+ case kAtomicCompareExchangeInt8:
+ __ uxtb(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicCompareExchangeUint8:
+ __ uxtb(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ break;
+ case kAtomicCompareExchangeInt16:
+ __ uxth(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicCompareExchangeUint16:
+ __ uxth(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ break;
+ case kAtomicCompareExchangeWord32:
+ __ mov(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kAtomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
+ __ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
+ break; \
+ case kAtomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
+ __ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
+ break; \
+ case kAtomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, add)
+ ATOMIC_BINOP_CASE(Sub, sub)
+ ATOMIC_BINOP_CASE(And, and_)
+ ATOMIC_BINOP_CASE(Or, orr)
+ ATOMIC_BINOP_CASE(Xor, eor)
+#undef ATOMIC_BINOP_CASE
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2141,7 +2483,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
return kSuccess;
@@ -2218,6 +2562,47 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
+ if (info()->IsWasm()) {
+ if (shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ __ Move(kScratchReg,
+ Operand(ExternalReference::address_of_real_stack_limit(
+ isolate())));
+ __ ldr(kScratchReg, MemOperand(kScratchReg));
+ __ add(kScratchReg, kScratchReg,
+ Operand(shrink_slots * kPointerSize));
+ __ cmp(sp, kScratchReg);
+ __ b(cs, &done);
+ }
+
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ // There is no need to leave the frame, we will not return from the
+ // runtime call.
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ __ Move(cp, Smi::kZero);
+ __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromThrow));
+ }
+
+ __ bind(&done);
+ }
+ }
__ sub(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -2292,6 +2677,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
+void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
+
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 0c19debad7..e709a23f5c 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -104,7 +104,11 @@ namespace compiler {
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
+ V(ArmVld1F64) \
V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
V(ArmFloat32Max) \
V(ArmFloat64Max) \
V(ArmFloat32Min) \
@@ -120,94 +124,120 @@ namespace compiler {
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
- V(ArmFloat32x4Splat) \
- V(ArmFloat32x4ExtractLane) \
- V(ArmFloat32x4ReplaceLane) \
- V(ArmFloat32x4FromInt32x4) \
- V(ArmFloat32x4FromUint32x4) \
- V(ArmFloat32x4Abs) \
- V(ArmFloat32x4Neg) \
- V(ArmFloat32x4Add) \
- V(ArmFloat32x4Sub) \
- V(ArmFloat32x4Equal) \
- V(ArmFloat32x4NotEqual) \
- V(ArmInt32x4Splat) \
- V(ArmInt32x4ExtractLane) \
- V(ArmInt32x4ReplaceLane) \
- V(ArmInt32x4FromFloat32x4) \
- V(ArmUint32x4FromFloat32x4) \
- V(ArmInt32x4Neg) \
- V(ArmInt32x4ShiftLeftByScalar) \
- V(ArmInt32x4ShiftRightByScalar) \
- V(ArmInt32x4Add) \
- V(ArmInt32x4Sub) \
- V(ArmInt32x4Mul) \
- V(ArmInt32x4Min) \
- V(ArmInt32x4Max) \
- V(ArmInt32x4Equal) \
- V(ArmInt32x4NotEqual) \
- V(ArmInt32x4GreaterThan) \
- V(ArmInt32x4GreaterThanOrEqual) \
- V(ArmUint32x4ShiftRightByScalar) \
- V(ArmUint32x4Min) \
- V(ArmUint32x4Max) \
- V(ArmUint32x4GreaterThan) \
- V(ArmUint32x4GreaterThanOrEqual) \
- V(ArmInt16x8Splat) \
- V(ArmInt16x8ExtractLane) \
- V(ArmInt16x8ReplaceLane) \
- V(ArmInt16x8Neg) \
- V(ArmInt16x8ShiftLeftByScalar) \
- V(ArmInt16x8ShiftRightByScalar) \
- V(ArmInt16x8Add) \
- V(ArmInt16x8AddSaturate) \
- V(ArmInt16x8Sub) \
- V(ArmInt16x8SubSaturate) \
- V(ArmInt16x8Mul) \
- V(ArmInt16x8Min) \
- V(ArmInt16x8Max) \
- V(ArmInt16x8Equal) \
- V(ArmInt16x8NotEqual) \
- V(ArmInt16x8GreaterThan) \
- V(ArmInt16x8GreaterThanOrEqual) \
- V(ArmUint16x8ShiftRightByScalar) \
- V(ArmUint16x8AddSaturate) \
- V(ArmUint16x8SubSaturate) \
- V(ArmUint16x8Min) \
- V(ArmUint16x8Max) \
- V(ArmUint16x8GreaterThan) \
- V(ArmUint16x8GreaterThanOrEqual) \
- V(ArmInt8x16Splat) \
- V(ArmInt8x16ExtractLane) \
- V(ArmInt8x16ReplaceLane) \
- V(ArmInt8x16Neg) \
- V(ArmInt8x16ShiftLeftByScalar) \
- V(ArmInt8x16ShiftRightByScalar) \
- V(ArmInt8x16Add) \
- V(ArmInt8x16AddSaturate) \
- V(ArmInt8x16Sub) \
- V(ArmInt8x16SubSaturate) \
- V(ArmInt8x16Mul) \
- V(ArmInt8x16Min) \
- V(ArmInt8x16Max) \
- V(ArmInt8x16Equal) \
- V(ArmInt8x16NotEqual) \
- V(ArmInt8x16GreaterThan) \
- V(ArmInt8x16GreaterThanOrEqual) \
- V(ArmUint8x16ShiftRightByScalar) \
- V(ArmUint8x16AddSaturate) \
- V(ArmUint8x16SubSaturate) \
- V(ArmUint8x16Min) \
- V(ArmUint8x16Max) \
- V(ArmUint8x16GreaterThan) \
- V(ArmUint8x16GreaterThanOrEqual) \
- V(ArmSimd128And) \
- V(ArmSimd128Or) \
- V(ArmSimd128Xor) \
- V(ArmSimd128Not) \
- V(ArmSimd32x4Select) \
- V(ArmSimd16x8Select) \
- V(ArmSimd8x16Select)
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4RecipRefine) \
+ V(ArmF32x4RecipSqrtRefine) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4LtS) \
+ V(ArmI32x4LeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4LtU) \
+ V(ArmI32x4LeU) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLane) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8LtS) \
+ V(ArmI16x8LeS) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSaturateU) \
+ V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8LtU) \
+ V(ArmI16x8LeU) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLane) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16Mul) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16LtS) \
+ V(ArmI8x16LeS) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSaturateU) \
+ V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16LtU) \
+ V(ArmI8x16LeU) \
+ V(ArmS128Zero) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS1x4AnyTrue) \
+ V(ArmS1x4AllTrue) \
+ V(ArmS1x8AnyTrue) \
+ V(ArmS1x8AllTrue) \
+ V(ArmS1x16AnyTrue) \
+ V(ArmS1x16AllTrue)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index ba2f21943a..e6f3464bb5 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -108,98 +108,126 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32Min:
case kArmFloat64Min:
case kArmFloat64SilenceNaN:
- case kArmFloat32x4Splat:
- case kArmFloat32x4ExtractLane:
- case kArmFloat32x4ReplaceLane:
- case kArmFloat32x4FromInt32x4:
- case kArmFloat32x4FromUint32x4:
- case kArmFloat32x4Abs:
- case kArmFloat32x4Neg:
- case kArmFloat32x4Add:
- case kArmFloat32x4Sub:
- case kArmFloat32x4Equal:
- case kArmFloat32x4NotEqual:
- case kArmInt32x4Splat:
- case kArmInt32x4ExtractLane:
- case kArmInt32x4ReplaceLane:
- case kArmInt32x4FromFloat32x4:
- case kArmUint32x4FromFloat32x4:
- case kArmInt32x4Neg:
- case kArmInt32x4ShiftLeftByScalar:
- case kArmInt32x4ShiftRightByScalar:
- case kArmInt32x4Add:
- case kArmInt32x4Sub:
- case kArmInt32x4Mul:
- case kArmInt32x4Min:
- case kArmInt32x4Max:
- case kArmInt32x4Equal:
- case kArmInt32x4NotEqual:
- case kArmInt32x4GreaterThan:
- case kArmInt32x4GreaterThanOrEqual:
- case kArmUint32x4ShiftRightByScalar:
- case kArmUint32x4Min:
- case kArmUint32x4Max:
- case kArmUint32x4GreaterThan:
- case kArmUint32x4GreaterThanOrEqual:
- case kArmInt16x8Splat:
- case kArmInt16x8ExtractLane:
- case kArmInt16x8ReplaceLane:
- case kArmInt16x8Neg:
- case kArmInt16x8ShiftLeftByScalar:
- case kArmInt16x8ShiftRightByScalar:
- case kArmInt16x8Add:
- case kArmInt16x8AddSaturate:
- case kArmInt16x8Sub:
- case kArmInt16x8SubSaturate:
- case kArmInt16x8Mul:
- case kArmInt16x8Min:
- case kArmInt16x8Max:
- case kArmInt16x8Equal:
- case kArmInt16x8NotEqual:
- case kArmInt16x8GreaterThan:
- case kArmInt16x8GreaterThanOrEqual:
- case kArmUint16x8ShiftRightByScalar:
- case kArmUint16x8AddSaturate:
- case kArmUint16x8SubSaturate:
- case kArmUint16x8Min:
- case kArmUint16x8Max:
- case kArmUint16x8GreaterThan:
- case kArmUint16x8GreaterThanOrEqual:
- case kArmInt8x16Splat:
- case kArmInt8x16ExtractLane:
- case kArmInt8x16ReplaceLane:
- case kArmInt8x16Neg:
- case kArmInt8x16ShiftLeftByScalar:
- case kArmInt8x16ShiftRightByScalar:
- case kArmInt8x16Add:
- case kArmInt8x16AddSaturate:
- case kArmInt8x16Sub:
- case kArmInt8x16SubSaturate:
- case kArmInt8x16Mul:
- case kArmInt8x16Min:
- case kArmInt8x16Max:
- case kArmInt8x16Equal:
- case kArmInt8x16NotEqual:
- case kArmInt8x16GreaterThan:
- case kArmInt8x16GreaterThanOrEqual:
- case kArmUint8x16ShiftRightByScalar:
- case kArmUint8x16AddSaturate:
- case kArmUint8x16SubSaturate:
- case kArmUint8x16Min:
- case kArmUint8x16Max:
- case kArmUint8x16GreaterThan:
- case kArmUint8x16GreaterThanOrEqual:
- case kArmSimd128And:
- case kArmSimd128Or:
- case kArmSimd128Xor:
- case kArmSimd128Not:
- case kArmSimd32x4Select:
- case kArmSimd16x8Select:
- case kArmSimd8x16Select:
+ case kArmF32x4Splat:
+ case kArmF32x4ExtractLane:
+ case kArmF32x4ReplaceLane:
+ case kArmF32x4SConvertI32x4:
+ case kArmF32x4UConvertI32x4:
+ case kArmF32x4Abs:
+ case kArmF32x4Neg:
+ case kArmF32x4RecipApprox:
+ case kArmF32x4RecipSqrtApprox:
+ case kArmF32x4Add:
+ case kArmF32x4Sub:
+ case kArmF32x4Mul:
+ case kArmF32x4Min:
+ case kArmF32x4Max:
+ case kArmF32x4RecipRefine:
+ case kArmF32x4RecipSqrtRefine:
+ case kArmF32x4Eq:
+ case kArmF32x4Ne:
+ case kArmF32x4Lt:
+ case kArmF32x4Le:
+ case kArmI32x4Splat:
+ case kArmI32x4ExtractLane:
+ case kArmI32x4ReplaceLane:
+ case kArmI32x4SConvertF32x4:
+ case kArmI32x4SConvertI16x8Low:
+ case kArmI32x4SConvertI16x8High:
+ case kArmI32x4Neg:
+ case kArmI32x4Shl:
+ case kArmI32x4ShrS:
+ case kArmI32x4Add:
+ case kArmI32x4Sub:
+ case kArmI32x4Mul:
+ case kArmI32x4MinS:
+ case kArmI32x4MaxS:
+ case kArmI32x4Eq:
+ case kArmI32x4Ne:
+ case kArmI32x4LtS:
+ case kArmI32x4LeS:
+ case kArmI32x4UConvertF32x4:
+ case kArmI32x4UConvertI16x8Low:
+ case kArmI32x4UConvertI16x8High:
+ case kArmI32x4ShrU:
+ case kArmI32x4MinU:
+ case kArmI32x4MaxU:
+ case kArmI32x4LtU:
+ case kArmI32x4LeU:
+ case kArmI16x8Splat:
+ case kArmI16x8ExtractLane:
+ case kArmI16x8ReplaceLane:
+ case kArmI16x8SConvertI8x16Low:
+ case kArmI16x8SConvertI8x16High:
+ case kArmI16x8Neg:
+ case kArmI16x8Shl:
+ case kArmI16x8ShrS:
+ case kArmI16x8SConvertI32x4:
+ case kArmI16x8Add:
+ case kArmI16x8AddSaturateS:
+ case kArmI16x8Sub:
+ case kArmI16x8SubSaturateS:
+ case kArmI16x8Mul:
+ case kArmI16x8MinS:
+ case kArmI16x8MaxS:
+ case kArmI16x8Eq:
+ case kArmI16x8Ne:
+ case kArmI16x8LtS:
+ case kArmI16x8LeS:
+ case kArmI16x8UConvertI8x16Low:
+ case kArmI16x8UConvertI8x16High:
+ case kArmI16x8ShrU:
+ case kArmI16x8UConvertI32x4:
+ case kArmI16x8AddSaturateU:
+ case kArmI16x8SubSaturateU:
+ case kArmI16x8MinU:
+ case kArmI16x8MaxU:
+ case kArmI16x8LtU:
+ case kArmI16x8LeU:
+ case kArmI8x16Splat:
+ case kArmI8x16ExtractLane:
+ case kArmI8x16ReplaceLane:
+ case kArmI8x16Neg:
+ case kArmI8x16Shl:
+ case kArmI8x16ShrS:
+ case kArmI8x16SConvertI16x8:
+ case kArmI8x16Add:
+ case kArmI8x16AddSaturateS:
+ case kArmI8x16Sub:
+ case kArmI8x16SubSaturateS:
+ case kArmI8x16Mul:
+ case kArmI8x16MinS:
+ case kArmI8x16MaxS:
+ case kArmI8x16Eq:
+ case kArmI8x16Ne:
+ case kArmI8x16LtS:
+ case kArmI8x16LeS:
+ case kArmI8x16UConvertI16x8:
+ case kArmI8x16AddSaturateU:
+ case kArmI8x16SubSaturateU:
+ case kArmI8x16ShrU:
+ case kArmI8x16MinU:
+ case kArmI8x16MaxU:
+ case kArmI8x16LtU:
+ case kArmI8x16LeU:
+ case kArmS128Zero:
+ case kArmS128And:
+ case kArmS128Or:
+ case kArmS128Xor:
+ case kArmS128Not:
+ case kArmS128Select:
+ case kArmS1x4AnyTrue:
+ case kArmS1x4AllTrue:
+ case kArmS1x8AnyTrue:
+ case kArmS1x8AllTrue:
+ case kArmS1x16AnyTrue:
+ case kArmS1x16AllTrue:
return kNoOpcodeFlags;
case kArmVldrF32:
case kArmVldrF64:
+ case kArmVld1F64:
+ case kArmVld1S128:
case kArmLdrb:
case kArmLdrsb:
case kArmLdrh:
@@ -209,6 +237,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmVstrF32:
case kArmVstrF64:
+ case kArmVst1F64:
+ case kArmVst1S128:
case kArmStrb:
case kArmStrh:
case kArmStr:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 0cffff7a1c..d69a82c608 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -426,8 +426,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kArmVld1S128;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -514,8 +516,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kArmStr;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kArmVst1S128;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -538,8 +542,8 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
}
void InstructionSelector::VisitUnalignedLoad(Node* node) {
- UnalignedLoadRepresentation load_rep =
- UnalignedLoadRepresentationOf(node->op());
+ MachineRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op()).representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -547,17 +551,18 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
InstructionCode opcode = kArmLdr;
// Only floating point loads need to be specially handled; integer loads
// support unaligned access. We support unaligned FP loads by loading to
- // integer registers first, then moving to the destination FP register.
- switch (load_rep.representation()) {
+ // integer registers first, then moving to the destination FP register. If
+ // NEON is supported, we use the vld1.8 instruction.
+ switch (load_rep) {
case MachineRepresentation::kFloat32: {
InstructionOperand temp = g.TempRegister();
EmitLoad(this, opcode, &temp, base, index);
Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
return;
}
- case MachineRepresentation::kFloat64: {
- // TODO(arm): use vld1.8 for this when NEON is available.
- // Compute the address of the least-significant half of the FP value.
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128: {
+ // Compute the address of the least-significant byte of the FP value.
// We assume that the base node is unlikely to be an encodable immediate
// or the result of a shift operation, so only consider the addressing
// mode that should be used for the index node.
@@ -568,8 +573,8 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
size_t input_count;
if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
&inputs[1])) {
- // input_count has been set by TryMatchImmediateOrShift(), so increment
- // it to account for the base register in inputs[0].
+ // input_count has been set by TryMatchImmediateOrShift(), so
+ // increment it to account for the base register in inputs[0].
input_count++;
} else {
add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
@@ -580,13 +585,22 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
InstructionOperand addr = g.TempRegister();
Emit(add_opcode, 1, &addr, input_count, inputs);
- // Load both halves and move to an FP register.
- InstructionOperand fp_lo = g.TempRegister();
- InstructionOperand fp_hi = g.TempRegister();
- opcode |= AddressingModeField::encode(kMode_Offset_RI);
- Emit(opcode, fp_lo, addr, g.TempImmediate(0));
- Emit(opcode, fp_hi, addr, g.TempImmediate(4));
- Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
+ if (CpuFeatures::IsSupported(NEON)) {
+ // With NEON we can load directly from the calculated address.
+ ArchOpcode op = load_rep == MachineRepresentation::kFloat64
+ ? kArmVld1F64
+ : kArmVld1S128;
+ Emit(op, g.DefineAsRegister(node), addr);
+ } else {
+ DCHECK_NE(MachineRepresentation::kSimd128, load_rep);
+ // Load both halves and move to an FP register.
+ InstructionOperand fp_lo = g.TempRegister();
+ InstructionOperand fp_hi = g.TempRegister();
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ Emit(opcode, fp_lo, addr, g.TempImmediate(0));
+ Emit(opcode, fp_hi, addr, g.TempImmediate(4));
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
+ }
return;
}
default:
@@ -611,6 +625,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
// Only floating point stores need to be specially handled; integer stores
// support unaligned access. We support unaligned FP stores by moving the
// value to integer registers first, then storing to the destination address.
+ // If NEON is supported, we use the vst1.8 instruction.
switch (store_rep) {
case MachineRepresentation::kFloat32: {
inputs[input_count++] = g.TempRegister();
@@ -619,31 +634,63 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
EmitStore(this, kArmStr, input_count, inputs, index);
return;
}
- case MachineRepresentation::kFloat64: {
- // TODO(arm): use vst1.8 for this when NEON is available.
- // Store a 64-bit floating point value using two 32-bit integer stores.
- // Computing the store address here would require three live temporary
- // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
- // storing the least-significant half of the value.
-
- // First, move the 64-bit FP value into two temporary integer registers.
- InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
- inputs[input_count++] = g.UseRegister(value);
- Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count,
- inputs);
-
- // Store the least-significant half.
- inputs[0] = fp[0]; // Low 32-bits of FP value.
- inputs[input_count++] = g.UseRegister(base); // First store base address.
- EmitStore(this, kArmStr, input_count, inputs, index);
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128: {
+ if (CpuFeatures::IsSupported(NEON)) {
+ InstructionOperand address = g.TempRegister();
+ {
+ // First we have to calculate the actual address.
+ InstructionCode add_opcode = kArmAdd;
+ InstructionOperand inputs[3];
+ inputs[0] = g.UseRegister(base);
+
+ size_t input_count;
+ if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
+ &inputs[1])) {
+ // input_count has been set by TryMatchImmediateOrShift(), so
+ // increment it to account for the base register in inputs[0].
+ input_count++;
+ } else {
+ add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[1] = g.UseRegister(index);
+ input_count = 2; // Base register and index.
+ }
- // Store the most-significant half.
- InstructionOperand base4 = g.TempRegister();
- Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
- g.UseRegister(base), g.TempImmediate(4)); // Compute base + 4.
- inputs[0] = fp[1]; // High 32-bits of FP value.
- inputs[1] = base4; // Second store base + 4 address.
- EmitStore(this, kArmStr, input_count, inputs, index);
+ Emit(add_opcode, 1, &address, input_count, inputs);
+ }
+
+ inputs[input_count++] = g.UseRegister(value);
+ inputs[input_count++] = address;
+ ArchOpcode op = store_rep == MachineRepresentation::kFloat64
+ ? kArmVst1F64
+ : kArmVst1S128;
+ Emit(op, 0, nullptr, input_count, inputs);
+ } else {
+ DCHECK_NE(MachineRepresentation::kSimd128, store_rep);
+ // Store a 64-bit floating point value using two 32-bit integer stores.
+ // Computing the store address here would require three live temporary
+ // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
+ // storing the least-significant half of the value.
+
+ // First, move the 64-bit FP value into two temporary integer registers.
+ InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
+ inputs[input_count++] = g.UseRegister(value);
+ Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count, inputs);
+
+ // Store the least-significant half.
+ inputs[0] = fp[0]; // Low 32-bits of FP value.
+ inputs[input_count++] =
+ g.UseRegister(base); // First store base address.
+ EmitStore(this, kArmStr, input_count, inputs, index);
+
+ // Store the most-significant half.
+ InstructionOperand base4 = g.TempRegister();
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
+ g.UseRegister(base), g.TempImmediate(4)); // Compute base + 4.
+ inputs[0] = fp[1]; // High 32-bits of FP value.
+ inputs[1] = base4; // Second store base + 4 address.
+ EmitStore(this, kArmStr, input_count, inputs, index);
+ }
return;
}
default:
@@ -2181,99 +2228,271 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[1];
+ temp[0] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 1, temp);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[2];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 2, temp);
+}
+
+void InstructionSelector::VisitAtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[2];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 2, temps);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitAtomic##op(Node* node) { \
+ VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
+ kAtomic##op##Int16, kAtomic##op##Uint16, \
+ kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
#define SIMD_TYPE_LIST(V) \
- V(Float32x4) \
- V(Int32x4) \
- V(Int16x8) \
- V(Int8x16)
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
#define SIMD_FORMAT_LIST(V) \
V(32x4) \
V(16x8) \
V(8x16)
-#define SIMD_UNOP_LIST(V) \
- V(Float32x4FromInt32x4) \
- V(Float32x4FromUint32x4) \
- V(Float32x4Abs) \
- V(Float32x4Neg) \
- V(Int32x4FromFloat32x4) \
- V(Uint32x4FromFloat32x4) \
- V(Int32x4Neg) \
- V(Int16x8Neg) \
- V(Int8x16Neg) \
- V(Simd128Not)
-
-#define SIMD_BINOP_LIST(V) \
- V(Float32x4Add) \
- V(Float32x4Sub) \
- V(Float32x4Equal) \
- V(Float32x4NotEqual) \
- V(Int32x4Add) \
- V(Int32x4Sub) \
- V(Int32x4Mul) \
- V(Int32x4Min) \
- V(Int32x4Max) \
- V(Int32x4Equal) \
- V(Int32x4NotEqual) \
- V(Int32x4GreaterThan) \
- V(Int32x4GreaterThanOrEqual) \
- V(Uint32x4Min) \
- V(Uint32x4Max) \
- V(Uint32x4GreaterThan) \
- V(Uint32x4GreaterThanOrEqual) \
- V(Int16x8Add) \
- V(Int16x8AddSaturate) \
- V(Int16x8Sub) \
- V(Int16x8SubSaturate) \
- V(Int16x8Mul) \
- V(Int16x8Min) \
- V(Int16x8Max) \
- V(Int16x8Equal) \
- V(Int16x8NotEqual) \
- V(Int16x8GreaterThan) \
- V(Int16x8GreaterThanOrEqual) \
- V(Uint16x8AddSaturate) \
- V(Uint16x8SubSaturate) \
- V(Uint16x8Min) \
- V(Uint16x8Max) \
- V(Uint16x8GreaterThan) \
- V(Uint16x8GreaterThanOrEqual) \
- V(Int8x16Add) \
- V(Int8x16AddSaturate) \
- V(Int8x16Sub) \
- V(Int8x16SubSaturate) \
- V(Int8x16Mul) \
- V(Int8x16Min) \
- V(Int8x16Max) \
- V(Int8x16Equal) \
- V(Int8x16NotEqual) \
- V(Int8x16GreaterThan) \
- V(Int8x16GreaterThanOrEqual) \
- V(Uint8x16AddSaturate) \
- V(Uint8x16SubSaturate) \
- V(Uint8x16Min) \
- V(Uint8x16Max) \
- V(Uint8x16GreaterThan) \
- V(Uint8x16GreaterThanOrEqual) \
- V(Simd128And) \
- V(Simd128Or) \
- V(Simd128Xor)
-
-#define SIMD_SHIFT_OP_LIST(V) \
- V(Int32x4ShiftLeftByScalar) \
- V(Int32x4ShiftRightByScalar) \
- V(Uint32x4ShiftRightByScalar) \
- V(Int16x8ShiftLeftByScalar) \
- V(Int16x8ShiftRightByScalar) \
- V(Uint16x8ShiftRightByScalar) \
- V(Int8x16ShiftLeftByScalar) \
- V(Int8x16ShiftRightByScalar) \
- V(Uint8x16ShiftRightByScalar)
-
-#define SIMD_VISIT_SPLAT(Type) \
- void InstructionSelector::VisitCreate##Type(Node* node) { \
- VisitRR(this, kArm##Type##Splat, node); \
+#define SIMD_ZERO_OP_LIST(V) \
+ V(S128Zero) \
+ V(S1x4Zero) \
+ V(S1x8Zero) \
+ V(S1x16Zero)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArmF32x4UConvertI32x4) \
+ V(F32x4Abs, kArmF32x4Abs) \
+ V(F32x4Neg, kArmF32x4Neg) \
+ V(F32x4RecipApprox, kArmF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4) \
+ V(I32x4SConvertI16x8Low, kArmI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kArmI32x4SConvertI16x8High) \
+ V(I32x4Neg, kArmI32x4Neg) \
+ V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \
+ V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \
+ V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \
+ V(I16x8Neg, kArmI16x8Neg) \
+ V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
+ V(I8x16Neg, kArmI8x16Neg) \
+ V(S128Not, kArmS128Not) \
+ V(S1x4Not, kArmS128Not) \
+ V(S1x4AnyTrue, kArmS1x4AnyTrue) \
+ V(S1x4AllTrue, kArmS1x4AllTrue) \
+ V(S1x8Not, kArmS128Not) \
+ V(S1x8AnyTrue, kArmS1x8AnyTrue) \
+ V(S1x8AllTrue, kArmS1x8AllTrue) \
+ V(S1x16Not, kArmS128Not) \
+ V(S1x16AnyTrue, kArmS1x16AnyTrue) \
+ V(S1x16AllTrue, kArmS1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kArmF32x4Add) \
+ V(F32x4Sub, kArmF32x4Sub) \
+ V(F32x4Mul, kArmF32x4Mul) \
+ V(F32x4Min, kArmF32x4Min) \
+ V(F32x4Max, kArmF32x4Max) \
+ V(F32x4RecipRefine, kArmF32x4RecipRefine) \
+ V(F32x4RecipSqrtRefine, kArmF32x4RecipSqrtRefine) \
+ V(F32x4Eq, kArmF32x4Eq) \
+ V(F32x4Ne, kArmF32x4Ne) \
+ V(F32x4Lt, kArmF32x4Lt) \
+ V(F32x4Le, kArmF32x4Le) \
+ V(I32x4Add, kArmI32x4Add) \
+ V(I32x4Sub, kArmI32x4Sub) \
+ V(I32x4Mul, kArmI32x4Mul) \
+ V(I32x4MinS, kArmI32x4MinS) \
+ V(I32x4MaxS, kArmI32x4MaxS) \
+ V(I32x4Eq, kArmI32x4Eq) \
+ V(I32x4Ne, kArmI32x4Ne) \
+ V(I32x4LtS, kArmI32x4LtS) \
+ V(I32x4LeS, kArmI32x4LeS) \
+ V(I32x4MinU, kArmI32x4MinU) \
+ V(I32x4MaxU, kArmI32x4MaxU) \
+ V(I32x4LtU, kArmI32x4LtU) \
+ V(I32x4LeU, kArmI32x4LeU) \
+ V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
+ V(I16x8Add, kArmI16x8Add) \
+ V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
+ V(I16x8Sub, kArmI16x8Sub) \
+ V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
+ V(I16x8Mul, kArmI16x8Mul) \
+ V(I16x8MinS, kArmI16x8MinS) \
+ V(I16x8MaxS, kArmI16x8MaxS) \
+ V(I16x8Eq, kArmI16x8Eq) \
+ V(I16x8Ne, kArmI16x8Ne) \
+ V(I16x8LtS, kArmI16x8LtS) \
+ V(I16x8LeS, kArmI16x8LeS) \
+ V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
+ V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
+ V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
+ V(I16x8MinU, kArmI16x8MinU) \
+ V(I16x8MaxU, kArmI16x8MaxU) \
+ V(I16x8LtU, kArmI16x8LtU) \
+ V(I16x8LeU, kArmI16x8LeU) \
+ V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
+ V(I8x16Add, kArmI8x16Add) \
+ V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
+ V(I8x16Sub, kArmI8x16Sub) \
+ V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
+ V(I8x16Mul, kArmI8x16Mul) \
+ V(I8x16MinS, kArmI8x16MinS) \
+ V(I8x16MaxS, kArmI8x16MaxS) \
+ V(I8x16Eq, kArmI8x16Eq) \
+ V(I8x16Ne, kArmI8x16Ne) \
+ V(I8x16LtS, kArmI8x16LtS) \
+ V(I8x16LeS, kArmI8x16LeS) \
+ V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
+ V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
+ V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
+ V(I8x16MinU, kArmI8x16MinU) \
+ V(I8x16MaxU, kArmI8x16MaxU) \
+ V(I8x16LtU, kArmI8x16LtU) \
+ V(I8x16LeU, kArmI8x16LeU) \
+ V(S128And, kArmS128And) \
+ V(S128Or, kArmS128Or) \
+ V(S128Xor, kArmS128Xor) \
+ V(S1x4And, kArmS128And) \
+ V(S1x4Or, kArmS128Or) \
+ V(S1x4Xor, kArmS128Xor) \
+ V(S1x8And, kArmS128And) \
+ V(S1x8Or, kArmS128Or) \
+ V(S1x8Xor, kArmS128Xor) \
+ V(S1x16And, kArmS128And) \
+ V(S1x16Or, kArmS128Or) \
+ V(S1x16Xor, kArmS128Xor)
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kArm##Type##Splat, node); \
}
SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
#undef SIMD_VISIT_SPLAT
@@ -2292,19 +2511,20 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
-#define SIMD_VISIT_UNOP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitRR(this, kArm##Name, node); \
+#define SIMD_VISIT_ZERO_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ ArmOperandGenerator g(this); \
+ Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
}
-SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
-#undef SIMD_VISIT_UNOP
+SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
+#undef SIMD_VISIT_ZERO_OP
-#define SIMD_VISIT_BINOP(Name) \
+#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRRR(this, kArm##Name, node); \
+ VisitRR(this, instruction, node); \
}
-SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
-#undef SIMD_VISIT_BINOP
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2313,13 +2533,28 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
-#define SIMD_VISIT_SELECT_OP(format) \
- void InstructionSelector::VisitSimd##format##Select(Node* node) { \
- VisitRRRR(this, kArmSimd##format##Select, node); \
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+#define SIMD_VISIT_SELECT_OP(format) \
+ void InstructionSelector::VisitS##format##Select(Node* node) { \
+ VisitRRRR(this, kArmS128Select, node); \
}
SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
#undef SIMD_VISIT_SELECT_OP
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 1cdedb0f9e..a72070a06d 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -4,13 +4,15 @@
#include "src/compiler/code-generator.h"
+#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
@@ -99,6 +101,10 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+ Register TempRegister32(size_t index) {
+ return ToRegister(instr_->TempAt(index)).W();
+ }
+
Operand InputOperand2_32(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
@@ -512,19 +518,55 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} \
} while (0)
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
- do { \
- __ asm_instr(i.OutputRegister(), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ Dmb(InnerShareable, BarrierAll); \
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ asm_instr(i.OutputRegister32(), i.TempRegister(0)); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ asm_instr(i.InputRegister32(2), i.TempRegister(0)); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ bind(&exchange); \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
+ __ store_instr(i.TempRegister32(0), i.InputRegister32(2), \
+ i.TempRegister(0)); \
+ __ cbnz(i.TempRegister32(0), &exchange); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ Dmb(InnerShareable, BarrierAll); \
- __ asm_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ Dmb(InnerShareable, BarrierAll); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ bind(&compareExchange); \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
+ __ cmp(i.TempRegister32(1), i.OutputRegister32()); \
+ __ B(ne, &exit); \
+ __ store_instr(i.TempRegister32(0), i.InputRegister32(3), \
+ i.TempRegister(0)); \
+ __ cbnz(i.TempRegister32(0), &compareExchange); \
+ __ bind(&exit); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
+ __ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \
+ Operand(i.InputRegister32(2))); \
+ __ store_instr(i.TempRegister32(1), i.TempRegister32(1), \
+ i.TempRegister(0)); \
+ __ cbnz(i.TempRegister32(1), &binop); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -1159,11 +1201,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Ubfx:
__ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
- i.InputInt6(2));
+ i.InputInt32(2));
break;
case kArm64Ubfx32:
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
- i.InputInt5(2));
+ i.InputInt32(2));
break;
case kArm64Ubfiz32:
__ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
@@ -1278,10 +1320,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
- __ Tst(i.InputOrZeroRegister64(0), i.InputOperand(1));
+ __ Tst(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
case kArm64Tst32:
- __ Tst(i.InputOrZeroRegister32(0), i.InputOperand32(1));
+ __ Tst(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Float32Cmp:
if (instr->InputAt(1)->IsFPRegister()) {
@@ -1603,34 +1645,94 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
case kAtomicLoadInt8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
break;
case kAtomicLoadInt16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
break;
case kAtomicLoadWord32:
- __ Ldr(i.OutputRegister32(),
- MemOperand(i.InputRegister(0), i.InputRegister(1)));
- __ Dmb(InnerShareable, BarrierAll);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar);
break;
case kAtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb);
break;
case kAtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh);
break;
case kAtomicStoreWord32:
- __ Dmb(InnerShareable, BarrierAll);
- __ Str(i.InputRegister32(2),
- MemOperand(i.InputRegister(0), i.InputRegister(1)));
- __ Dmb(InnerShareable, BarrierAll);
- break;
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr);
+ break;
+ case kAtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ break;
+ case kAtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ break;
+ case kAtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr);
+ break;
+ case kAtomicCompareExchangeInt8:
+ __ Uxtb(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicCompareExchangeUint8:
+ __ Uxtb(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
+ break;
+ case kAtomicCompareExchangeInt16:
+ __ Uxth(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicCompareExchangeUint16:
+ __ Uxth(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
+ break;
+ case kAtomicCompareExchangeWord32:
+ __ mov(i.TempRegister(1), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kAtomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
+ break; \
+ case kAtomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
+ break; \
+ case kAtomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add)
+ ATOMIC_BINOP_CASE(Sub, Sub)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Orr)
+ ATOMIC_BINOP_CASE(Xor, Eor)
+#undef ATOMIC_BINOP_CASE
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1818,7 +1920,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1992,6 +2096,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
+void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index bacf7921b7..a471a2b8b3 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -1111,7 +1112,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1f;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -1155,7 +1156,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (m.left().IsWord64And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x3f;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -1258,6 +1259,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \
V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \
V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
+ V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \
V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -2665,8 +2667,12 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
UNREACHABLE();
return;
}
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicStore(Node* node) {
@@ -2698,7 +2704,142 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[2];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 2, temp);
+}
+
+void InstructionSelector::VisitAtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[2];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 2, temps);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitAtomic##op(Node* node) { \
+ VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
+ kAtomic##op##Int16, kAtomic##op##Uint16, \
+ kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
}
// static
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
index 3095423854..edf96026e7 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
@@ -24,6 +24,7 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
eh_frame_writer_.AdvanceLocation(pc_offset);
if (initial_state->saved_lr_) {
eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ eh_frame_writer_.RecordRegisterSavedToStack(fp, 0);
} else {
eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
}
@@ -76,6 +77,7 @@ void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
// the construction, since the LR itself is not modified in the process.
eh_frame_writer_.AdvanceLocation(at_pc);
eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ eh_frame_writer_.RecordRegisterSavedToStack(fp, 0);
saved_lr_ = true;
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index e199a032a8..b92a205600 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -1565,8 +1565,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
ast_context()->ProduceValue(expr, value);
}
-
-void AstGraphBuilder::VisitYield(Yield* expr) {
+void AstGraphBuilder::VisitSuspend(Suspend* expr) {
// Generator functions are supported only by going through Ignition first.
UNREACHABLE();
}
@@ -1972,9 +1971,10 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// with the full codegen: We don't push both left and right values onto
// the expression stack when one side is a special-case literal.
Expression* sub_expr = nullptr;
- Handle<String> check;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- return VisitLiteralCompareTypeof(expr, sub_expr, check);
+ Literal* literal;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &literal)) {
+ return VisitLiteralCompareTypeof(expr, sub_expr,
+ Handle<String>::cast(literal->value()));
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
return VisitLiteralCompareNil(expr, sub_expr,
@@ -1990,15 +1990,9 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ:
op = javascript()->Equal(hint);
break;
- case Token::NE:
- op = javascript()->NotEqual(hint);
- break;
case Token::EQ_STRICT:
op = javascript()->StrictEqual(hint);
break;
- case Token::NE_STRICT:
- op = javascript()->StrictNotEqual(hint);
- break;
case Token::LT:
op = javascript()->LessThan(hint);
break;
@@ -2047,6 +2041,11 @@ void AstGraphBuilder::VisitGetIterator(GetIterator* expr) {
UNREACHABLE();
}
+void AstGraphBuilder::VisitImportCallExpression(ImportCallExpression* expr) {
+ // ImportCallExpression is supported only by going through Ignition first.
+ UNREACHABLE();
+}
+
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
ast_context()->ProduceValue(expr, value);
@@ -2700,7 +2699,7 @@ Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
Node* call = NewNode(op, exception);
PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
+ Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
@@ -2712,7 +2711,7 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
Node* call = NewNode(op, variable_name);
PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
+ Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
@@ -2723,7 +2722,7 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
javascript()->CallRuntime(Runtime::kThrowConstAssignError);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
+ Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
@@ -2744,7 +2743,7 @@ Node* AstGraphBuilder::BuildReturn(Node* return_value) {
Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
NewNode(javascript()->CallRuntime(Runtime::kReThrow), exception_value);
- Node* control = NewNode(common()->Throw(), exception_value);
+ Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return control;
}
@@ -2814,9 +2813,7 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
return jsgraph_->BooleanConstant(object->BooleanValue());
}
case IrOpcode::kJSEqual:
- case IrOpcode::kJSNotEqual:
case IrOpcode::kJSStrictEqual:
- case IrOpcode::kJSStrictNotEqual:
case IrOpcode::kJSLessThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThan:
@@ -2931,19 +2928,13 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
if (!environment()->IsMarkedAsUnreachable()) {
// Update the current control dependency for control-producing nodes.
- if (NodeProperties::IsControl(result)) {
+ if (result->op()->ControlOutputCount() > 0) {
environment_->UpdateControlDependency(result);
}
// Update the current effect dependency for effect-producing nodes.
if (result->op()->EffectOutputCount() > 0) {
environment_->UpdateEffectDependency(result);
}
- // Add implicit success continuation for throwing nodes.
- if (!result->op()->HasProperty(Operator::kNoThrow)) {
- const Operator* op = common()->IfSuccess();
- Node* on_success = graph()->NewNode(op, result);
- environment_->UpdateControlDependency(on_success);
- }
}
}
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 8239e3a058..ff66bf4976 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -149,8 +149,7 @@ void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
void ALAA::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
-
-void ALAA::VisitYield(Yield* stmt) {
+void ALAA::VisitSuspend(Suspend* stmt) {
Visit(stmt->generator_object());
Visit(stmt->expression());
}
@@ -204,6 +203,8 @@ void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
void ALAA::VisitGetIterator(GetIterator* e) { UNREACHABLE(); }
+void ALAA::VisitImportCallExpression(ImportCallExpression* e) { UNREACHABLE(); }
+
void ALAA::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
VisitStatements(cc->statements());
@@ -222,8 +223,7 @@ void ALAA::VisitSloppyBlockFunctionStatement(
void ALAA::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
Visit(stmt->catch_block());
- // TODO(turbofan): are catch variables well-scoped?
- AnalyzeAssignment(stmt->variable());
+ AnalyzeAssignment(stmt->scope()->catch_variable());
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 2d9a084e21..96327e7856 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -175,9 +175,8 @@ Reduction BranchElimination::ReduceStart(Node* node) {
return UpdateConditions(node, ControlPathConditions::Empty(zone_));
}
-
const BranchElimination::ControlPathConditions*
-BranchElimination::PathConditionsForControlNodes::Get(Node* node) {
+BranchElimination::PathConditionsForControlNodes::Get(Node* node) const {
if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
return info_for_node_[node->id()];
}
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 3c2cdb25a8..c1431523e5 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
public:
PathConditionsForControlNodes(Zone* zone, size_t size_hint)
: info_for_node_(size_hint, nullptr, zone) {}
- const ControlPathConditions* Get(Node* node);
+ const ControlPathConditions* Get(Node* node) const;
void Set(Node* node, const ControlPathConditions* conditions);
private:
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index aaeee666aa..dcaed97481 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -6,9 +6,8 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/compilation-info.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/compiler-source-position-table.h"
-#include "src/compiler/js-type-hint-lowering.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -465,7 +464,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
JSGraph* jsgraph, float invocation_frequency,
- SourcePositionTable* source_positions, int inlining_id)
+ SourcePositionTable* source_positions, int inlining_id,
+ JSTypeHintLowering::Flags flags)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
@@ -473,6 +473,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
feedback_vector_(feedback_vector),
+ type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
@@ -802,6 +803,20 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitCollectTypeProfile() {
+ PrepareEagerCheckpoint();
+
+ Node* position =
+ jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
+ Node* value = environment()->LookupAccumulator();
+ Node* vector = jsgraph()->Constant(feedback_vector());
+
+ const Operator* op = javascript()->CallRuntime(Runtime::kCollectTypeProfile);
+
+ Node* node = NewNode(op, position, value, vector);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitLdaContextSlot() {
const Operator* op = javascript()->LoadContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
@@ -1046,9 +1061,17 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
-
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object);
+
+ Node* node = nullptr;
+ if (Node* simplified =
+ TryBuildSimplifiedLoadNamed(op, object, feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = NewNode(op, object);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1059,9 +1082,17 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
-
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key);
+
+ Node* node = nullptr;
+ if (Node* simplified =
+ TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = NewNode(op, object, key);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1087,7 +1118,16 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
language_mode);
op = javascript()->StoreNamed(language_mode, name, feedback);
}
- Node* node = NewNode(op, object, value);
+
+ Node* node = nullptr;
+ if (Node* simplified =
+ TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = NewNode(op, object, value);
+ }
+
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1112,10 +1152,18 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
-
DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()), language_mode);
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = NewNode(op, object, key, value);
+
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedStoreKeyed(op, object, key, value,
+ feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = NewNode(op, object, key, value);
+ }
+
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1293,46 +1341,187 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
literal, Environment::kAttachFrameState);
}
+Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegister(
+ Node* callee, Node* receiver, interpreter::Register first_arg,
+ int arg_count) {
+ // The arity of the Call node -- includes the callee, receiver and function
+ // arguments.
+ int arity = 2 + arg_count;
+
+ Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
+
+ all[0] = callee;
+ all[1] = receiver;
+
+ // The function arguments are in consecutive registers.
+ int arg_base = first_arg.index();
+ for (int i = 0; i < arg_count; ++i) {
+ all[2 + i] =
+ environment()->LookupRegister(interpreter::Register(arg_base + i));
+ }
+
+ return all;
+}
+
+Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
+ Node* const* args,
+ int arg_count) {
+ return MakeNode(call_op, arg_count, args, false);
+}
+
Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
Node* callee,
interpreter::Register receiver,
- size_t arity) {
- Node** all = local_zone()->NewArray<Node*>(static_cast<int>(arity));
- all[0] = callee;
- all[1] = environment()->LookupRegister(receiver);
- int receiver_index = receiver.index();
- for (int i = 2; i < static_cast<int>(arity); ++i) {
- all[i] = environment()->LookupRegister(
- interpreter::Register(receiver_index + i - 1));
- }
- Node* value = MakeNode(call_op, static_cast<int>(arity), all, false);
- return value;
+ size_t reg_count) {
+ Node* receiver_node = environment()->LookupRegister(receiver);
+ // The receiver is followed by the arguments in the consecutive registers.
+ DCHECK_GE(reg_count, 1);
+ interpreter::Register first_arg = interpreter::Register(receiver.index() + 1);
+ int arg_count = static_cast<int>(reg_count) - 1;
+
+ Node* const* call_args =
+ GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
+ return ProcessCallArguments(call_op, call_args, 2 + arg_count);
}
void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_hint) {
+ ConvertReceiverMode receiver_mode,
+ Node* const* args, size_t arg_count,
+ int slot_id) {
+ DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
+ bytecode_iterator().current_bytecode()),
+ receiver_mode);
PrepareEagerCheckpoint();
- Node* callee =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
-
// Slot index of 0 is used indicate no feedback slot is available. Assert
// the assumption that slot index 0 is never a valid feedback slot.
STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
- int const slot_id = bytecode_iterator().GetIndexOperand(3);
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
float const frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->Call(arg_count + 1, frequency, feedback,
- receiver_hint, tail_call_mode);
- Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+ const Operator* call = javascript()->Call(arg_count, frequency, feedback,
+ receiver_mode, tail_call_mode);
+ Node* value = ProcessCallArguments(call, args, static_cast<int>(arg_count));
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitCall() {
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
+void BytecodeGraphBuilder::BuildCallVarArgs(TailCallMode tail_call_mode,
+ ConvertReceiverMode receiver_mode) {
+ DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
+ bytecode_iterator().current_bytecode()),
+ receiver_mode);
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+
+ Node* receiver_node;
+ interpreter::Register first_arg;
+ int arg_count;
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // The receiver is implicit (and undefined), the arguments are in
+ // consecutive registers.
+ receiver_node = jsgraph()->UndefinedConstant();
+ first_arg = first_reg;
+ arg_count = static_cast<int>(reg_count);
+ } else {
+ // The receiver is the first register, followed by the arguments in the
+ // consecutive registers.
+ DCHECK_GE(reg_count, 1);
+ receiver_node = environment()->LookupRegister(first_reg);
+ first_arg = interpreter::Register(first_reg.index() + 1);
+ arg_count = static_cast<int>(reg_count) - 1;
+ }
+
+ Node* const* call_args =
+ GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
+ BuildCall(tail_call_mode, receiver_mode, call_args,
+ static_cast<size_t>(2 + arg_count), slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallAnyReceiver() {
+ BuildCallVarArgs(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
+}
+
+void BytecodeGraphBuilder::VisitCallProperty() {
+ BuildCallVarArgs(TailCallMode::kDisallow,
+ ConvertReceiverMode::kNotNullOrUndefined);
+}
+
+void BytecodeGraphBuilder::VisitCallProperty0() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ int const slot_id = bytecode_iterator().GetIndexOperand(2);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
+ {callee, receiver}, slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallProperty1() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* arg0 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
+ {callee, receiver, arg0}, slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallProperty2() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* arg0 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
+ Node* arg1 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(3));
+ int const slot_id = bytecode_iterator().GetIndexOperand(4);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
+ {callee, receiver, arg0, arg1}, slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallUndefinedReceiver() {
+ BuildCallVarArgs(TailCallMode::kDisallow,
+ ConvertReceiverMode::kNullOrUndefined);
+}
+
+void BytecodeGraphBuilder::VisitCallUndefinedReceiver0() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver = jsgraph()->UndefinedConstant();
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
+ {callee, receiver}, slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallUndefinedReceiver1() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arg0 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ int const slot_id = bytecode_iterator().GetIndexOperand(2);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
+ {callee, receiver, arg0}, slot_id);
+}
+
+void BytecodeGraphBuilder::VisitCallUndefinedReceiver2() {
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arg0 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* arg1 =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
+ {callee, receiver, arg0, arg1}, slot_id);
}
void BytecodeGraphBuilder::VisitCallWithSpread() {
@@ -1340,24 +1529,20 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
Node* callee =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
const Operator* call =
- javascript()->CallWithSpread(static_cast<int>(arg_count + 1));
+ javascript()->CallWithSpread(static_cast<int>(reg_count + 1));
- Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+ Node* value = ProcessCallArguments(call, callee, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitCallProperty() {
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
-}
-
void BytecodeGraphBuilder::VisitTailCall() {
TailCallMode tail_call_mode =
bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
? TailCallMode::kAllow
: TailCallMode::kDisallow;
- BuildCall(tail_call_mode, ConvertReceiverMode::kAny);
+ BuildCallVarArgs(tail_call_mode, ConvertReceiverMode::kAny);
}
void BytecodeGraphBuilder::VisitCallJSRuntime() {
@@ -1365,118 +1550,127 @@ void BytecodeGraphBuilder::VisitCallJSRuntime() {
Node* callee =
BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the JS runtime call.
- const Operator* call = javascript()->Call(arg_count + 1);
- Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+ const Operator* call = javascript()->Call(reg_count + 1);
+ Node* value = ProcessCallArguments(call, callee, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
- const Operator* call_runtime_op, interpreter::Register first_arg,
- size_t arity) {
- Node** all = local_zone()->NewArray<Node*>(arity);
- int first_arg_index = first_arg.index();
- for (int i = 0; i < static_cast<int>(arity); ++i) {
+ const Operator* call_runtime_op, interpreter::Register receiver,
+ size_t reg_count) {
+ int arg_count = static_cast<int>(reg_count);
+ // arity is args.
+ int arity = arg_count;
+ Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
+ int first_arg_index = receiver.index();
+ for (int i = 0; i < static_cast<int>(reg_count); ++i) {
all[i] = environment()->LookupRegister(
interpreter::Register(first_arg_index + i));
}
- Node* value = MakeNode(call_runtime_op, static_cast<int>(arity), all, false);
+ Node* value = MakeNode(call_runtime_op, arity, all, false);
return value;
}
void BytecodeGraphBuilder::VisitCallRuntime() {
PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the runtime call.
- const Operator* call = javascript()->CallRuntime(functionId, arg_count);
- Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ const Operator* call = javascript()->CallRuntime(functionId, reg_count);
+ Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
interpreter::Register first_return =
bytecode_iterator().GetRegisterOperand(3);
// Create node to perform the runtime call.
- const Operator* call = javascript()->CallRuntime(functionId, arg_count);
- Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ const Operator* call = javascript()->CallRuntime(functionId, reg_count);
+ Node* return_pair = ProcessCallRuntimeArguments(call, receiver, reg_count);
environment()->BindRegistersToProjections(first_return, return_pair,
Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
const Operator* op, Node* callee, Node* new_target,
- interpreter::Register first_arg, size_t arity) {
- Node** all = local_zone()->NewArray<Node*>(arity);
+ interpreter::Register receiver, size_t reg_count) {
+ int arg_count = static_cast<int>(reg_count);
+ // arity is args + callee and new target.
+ int arity = arg_count + 2;
+ Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = callee;
- int first_arg_index = first_arg.index();
- for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
- all[i] = environment()->LookupRegister(
- interpreter::Register(first_arg_index + i - 1));
+ int first_arg_index = receiver.index();
+ for (int i = 0; i < arg_count; ++i) {
+ all[1 + i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i));
}
all[arity - 1] = new_target;
- Node* value = MakeNode(op, static_cast<int>(arity), all, false);
+ Node* value = MakeNode(op, arity, all, false);
return value;
}
void BytecodeGraphBuilder::VisitConstructWithSpread() {
PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
Node* new_target = environment()->LookupAccumulator();
Node* callee = environment()->LookupRegister(callee_reg);
const Operator* op =
- javascript()->ConstructWithSpread(static_cast<int>(arg_count) + 2);
+ javascript()->ConstructWithSpread(static_cast<uint32_t>(reg_count + 2));
Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
- first_arg, arg_count + 2);
+ receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the runtime call. Turbofan will take care of the
// lowering.
- const Operator* call = javascript()->CallRuntime(functionId, arg_count);
- Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ const Operator* call = javascript()->CallRuntime(functionId, reg_count);
+ Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessConstructArguments(
const Operator* call_new_op, Node* callee, Node* new_target,
- interpreter::Register first_arg, size_t arity) {
- Node** all = local_zone()->NewArray<Node*>(arity);
+ interpreter::Register receiver, size_t reg_count) {
+ int arg_count = static_cast<int>(reg_count);
+ // arity is args + callee and new target.
+ int arity = arg_count + 2;
+ Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = callee;
- int first_arg_index = first_arg.index();
- for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
- all[i] = environment()->LookupRegister(
- interpreter::Register(first_arg_index + i - 1));
+ int first_arg_index = receiver.index();
+ for (int i = 0; i < arg_count; ++i) {
+ all[1 + i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i));
}
all[arity - 1] = new_target;
- Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
+ Node* value = MakeNode(call_new_op, arity, all, false);
return value;
}
void BytecodeGraphBuilder::VisitConstruct() {
PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Slot index of 0 is used indicate no feedback slot is available. Assert
// the assumption that slot index 0 is never a valid feedback slot.
STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
@@ -1488,9 +1682,9 @@ void BytecodeGraphBuilder::VisitConstruct() {
float const frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->Construct(
- static_cast<int>(arg_count) + 2, frequency, feedback);
- Node* value = ProcessConstructArguments(call, callee, new_target, first_arg,
- arg_count + 2);
+ static_cast<uint32_t>(reg_count + 2), frequency, feedback);
+ Node* value =
+ ProcessConstructArguments(call, callee, new_target, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1499,36 +1693,18 @@ void BytecodeGraphBuilder::VisitThrow() {
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
environment()->BindAccumulator(call, Environment::kAttachFrameState);
- Node* control = NewNode(common()->Throw(), call);
+ Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
}
void BytecodeGraphBuilder::VisitReThrow() {
BuildLoopExitsForFunctionExit();
Node* value = environment()->LookupAccumulator();
- Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
- Node* control = NewNode(common()->Throw(), call);
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
+ Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
- Node* left, Node* right,
- FeedbackSlot slot) {
- Node* effect = environment()->GetEffectDependency();
- Node* control = environment()->GetControlDependency();
- JSTypeHintLowering type_hint_lowering(jsgraph(), feedback_vector());
- Reduction early_reduction = type_hint_lowering.ReduceBinaryOperation(
- op, left, right, effect, control, slot);
- if (early_reduction.Changed()) {
- Node* node = early_reduction.replacement();
- if (node->op()->EffectOutputCount() > 0) {
- environment()->UpdateEffectDependency(node);
- }
- return node;
- }
- return nullptr;
-}
-
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
@@ -1622,8 +1798,7 @@ void BytecodeGraphBuilder::VisitShiftRightLogical() {
void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
PrepareEagerCheckpoint();
- Node* left =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
Node* node = nullptr;
@@ -1647,10 +1822,26 @@ void BytecodeGraphBuilder::VisitSubSmi() {
BuildBinaryOpWithImmediate(javascript()->Subtract());
}
+void BytecodeGraphBuilder::VisitMulSmi() {
+ BuildBinaryOpWithImmediate(javascript()->Multiply());
+}
+
+void BytecodeGraphBuilder::VisitDivSmi() {
+ BuildBinaryOpWithImmediate(javascript()->Divide());
+}
+
+void BytecodeGraphBuilder::VisitModSmi() {
+ BuildBinaryOpWithImmediate(javascript()->Modulus());
+}
+
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
}
+void BytecodeGraphBuilder::VisitBitwiseXorSmi() {
+ BuildBinaryOpWithImmediate(javascript()->BitwiseXor());
+}
+
void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
}
@@ -1663,6 +1854,10 @@ void BytecodeGraphBuilder::VisitShiftRightSmi() {
BuildBinaryOpWithImmediate(javascript()->ShiftRight());
}
+void BytecodeGraphBuilder::VisitShiftRightLogicalSmi() {
+ BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical());
+}
+
void BytecodeGraphBuilder::VisitInc() {
PrepareEagerCheckpoint();
// Note: Use subtract -1 here instead of add 1 to ensure we always convert to
@@ -1745,12 +1940,21 @@ void BytecodeGraphBuilder::VisitGetSuperConstructor() {
Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- Node* node = NewNode(js_op, left, right);
+
+ int slot_index = bytecode_iterator().GetIndexOperand(1);
+ DCHECK(slot_index != 0);
+ FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(op, left, right);
+ }
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1758,10 +1962,6 @@ void BytecodeGraphBuilder::VisitTestEqual() {
BuildCompareOp(javascript()->Equal(GetCompareOperationHint()));
}
-void BytecodeGraphBuilder::VisitTestNotEqual() {
- BuildCompareOp(javascript()->NotEqual(GetCompareOperationHint()));
-}
-
void BytecodeGraphBuilder::VisitTestEqualStrict() {
BuildCompareOp(javascript()->StrictEqual(GetCompareOperationHint()));
}
@@ -1782,37 +1982,109 @@ void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
BuildCompareOp(javascript()->GreaterThanOrEqual(GetCompareOperationHint()));
}
+void BytecodeGraphBuilder::VisitTestEqualStrictNoFeedback() {
+ // TODO(5310): Currently this is used with both Smi operands and with
+ // string operands. We pass string operands for static property check in
+ // VisitClassLiteralProperties. This should be changed, so we only use this
+ // for Smi operations and lower it to SpeculativeNumberEqual[kSignedSmall]
+ PrepareEagerCheckpoint();
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+
+ Node* node = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ left, right);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::BuildTestingOp(const Operator* op) {
+ PrepareEagerCheckpoint();
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(op, left, right);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitTestIn() {
- BuildCompareOp(javascript()->HasProperty());
+ BuildTestingOp(javascript()->HasProperty());
}
void BytecodeGraphBuilder::VisitTestInstanceOf() {
- BuildCompareOp(javascript()->InstanceOf());
+ BuildTestingOp(javascript()->InstanceOf());
}
void BytecodeGraphBuilder::VisitTestUndetectable() {
- Node* object =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* object = environment()->LookupAccumulator();
Node* node = NewNode(jsgraph()->simplified()->ObjectIsUndetectable(), object);
environment()->BindAccumulator(node);
}
void BytecodeGraphBuilder::VisitTestNull() {
- Node* object =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* object = environment()->LookupAccumulator();
Node* result = NewNode(simplified()->ReferenceEqual(), object,
jsgraph()->NullConstant());
environment()->BindAccumulator(result);
}
void BytecodeGraphBuilder::VisitTestUndefined() {
- Node* object =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* object = environment()->LookupAccumulator();
Node* result = NewNode(simplified()->ReferenceEqual(), object,
jsgraph()->UndefinedConstant());
environment()->BindAccumulator(result);
}
+void BytecodeGraphBuilder::VisitTestTypeOf() {
+ Node* object = environment()->LookupAccumulator();
+ auto literal_flag = interpreter::TestTypeOfFlags::Decode(
+ bytecode_iterator().GetFlagOperand(0));
+ Node* result;
+ switch (literal_flag) {
+ case interpreter::TestTypeOfFlags::LiteralFlag::kNumber:
+ result = NewNode(simplified()->ObjectIsNumber(), object);
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kString:
+ result = NewNode(simplified()->ObjectIsString(), object);
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol:
+ result = NewNode(simplified()->ObjectIsSymbol(), object);
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean:
+ result = NewNode(common()->Select(MachineRepresentation::kTagged),
+ NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->TrueConstant()),
+ jsgraph()->TrueConstant(),
+ NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->FalseConstant()));
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined:
+ result = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->NullConstant()),
+ jsgraph()->FalseConstant(),
+ graph()->NewNode(simplified()->ObjectIsUndetectable(), object));
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kFunction:
+ result =
+ graph()->NewNode(simplified()->ObjectIsDetectableCallable(), object);
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kObject:
+ result = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ObjectIsNonCallable(), object),
+ jsgraph()->TrueConstant(),
+ graph()->NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->NullConstant()));
+ break;
+ case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
+ UNREACHABLE(); // Should never be emitted.
+ result = nullptr;
+ break;
+ }
+ environment()->BindAccumulator(result);
+}
+
void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
Node* value = NewNode(js_op, environment()->LookupAccumulator());
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
@@ -1828,7 +2100,20 @@ void BytecodeGraphBuilder::VisitToObject() {
}
void BytecodeGraphBuilder::VisitToNumber() {
- BuildCastOperator(javascript()->ToNumber());
+ PrepareEagerCheckpoint();
+ Node* object = environment()->LookupAccumulator();
+
+ Node* node = nullptr;
+ FeedbackSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ if (Node* simplified = TryBuildSimplifiedToNumber(object, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(javascript()->ToNumber(), object);
+ }
+
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+ Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
@@ -1879,6 +2164,14 @@ void BytecodeGraphBuilder::VisitJumpIfNullConstant() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
+void BytecodeGraphBuilder::VisitJumpIfNotNull() {
+ BuildJumpIfNotEqual(jsgraph()->NullConstant());
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNotNullConstant() {
+ BuildJumpIfNotEqual(jsgraph()->NullConstant());
+}
+
void BytecodeGraphBuilder::VisitJumpIfUndefined() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
@@ -1887,6 +2180,14 @@ void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
+void BytecodeGraphBuilder::VisitJumpIfNotUndefined() {
+ BuildJumpIfNotEqual(jsgraph()->UndefinedConstant());
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNotUndefinedConstant() {
+ BuildJumpIfNotEqual(jsgraph()->UndefinedConstant());
+}
+
void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
void BytecodeGraphBuilder::VisitStackCheck() {
@@ -1921,7 +2222,7 @@ void BytecodeGraphBuilder::VisitDebugger() {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
-void BytecodeGraphBuilder::BuildForInPrepare() {
+void BytecodeGraphBuilder::VisitForInPrepare() {
PrepareEagerCheckpoint();
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1931,8 +2232,6 @@ void BytecodeGraphBuilder::BuildForInPrepare() {
Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
-
void BytecodeGraphBuilder::VisitForInContinue() {
PrepareEagerCheckpoint();
Node* index =
@@ -1945,7 +2244,7 @@ void BytecodeGraphBuilder::VisitForInContinue() {
environment()->BindAccumulator(exit_cond, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildForInNext() {
+void BytecodeGraphBuilder::VisitForInNext() {
PrepareEagerCheckpoint();
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1962,8 +2261,6 @@ void BytecodeGraphBuilder::BuildForInNext() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
-
void BytecodeGraphBuilder::VisitForInStep() {
PrepareEagerCheckpoint();
Node* index =
@@ -1978,6 +2275,8 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
Node* state = environment()->LookupAccumulator();
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
+ SuspendFlags flags = interpreter::SuspendGeneratorBytecodeFlags::Decode(
+ bytecode_iterator().GetFlagOperand(1));
// The offsets used by the bytecode iterator are relative to a different base
// than what is used in the interpreter, hence the addition.
Node* offset =
@@ -1996,8 +2295,8 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
environment()->LookupRegister(interpreter::Register(i));
}
- MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
- value_inputs, false);
+ MakeNode(javascript()->GeneratorStore(register_count, flags),
+ value_input_count, value_inputs, false);
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
@@ -2164,6 +2463,13 @@ void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
BuildJumpIf(condition);
}
+void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition =
+ NewNode(simplified()->ReferenceEqual(), accumulator, comperand);
+ BuildJumpIfNot(condition);
+}
+
void BytecodeGraphBuilder::BuildJumpIfFalse() {
NewBranch(environment()->LookupAccumulator());
Environment* if_true_environment = environment()->Copy();
@@ -2213,6 +2519,119 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
BuildJumpIf(condition);
}
+Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
+ Node* left, Node* right,
+ FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceBinaryOperation(
+ op, left, right, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
+ FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceToNumberOperation(
+ value, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
+ Node* receiver,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a LoadIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceLoadNamedOperation(
+ op, receiver, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
+ Node* receiver,
+ Node* key,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a LoadIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceLoadKeyedOperation(
+ op, receiver, key, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
+ Node* receiver,
+ Node* value,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a LoadIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceStoreNamedOperation(
+ op, receiver, value, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
+ Node* receiver,
+ Node* key, Node* value,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a LoadIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceStoreKeyedOperation(
+ op, receiver, key, value, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+void BytecodeGraphBuilder::ApplyEarlyReduction(Reduction reduction) {
+ Node* node = reduction.replacement();
+ DCHECK(node->op()->HasProperty(Operator::kNoWrite));
+ if (node->op()->EffectOutputCount() > 0) {
+ environment()->UpdateEffectDependency(node);
+ }
+ if (IrOpcode::IsGraphTerminator(node->opcode())) {
+ MergeControlToLeaveFunction(node);
+ }
+}
+
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
if (size > input_buffer_size_) {
size = size + kInputBufferSizeIncrement + input_buffer_size_;
@@ -2247,7 +2666,8 @@ void BytecodeGraphBuilder::EnterAndExitExceptionHandlers(int current_offset) {
}
Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs, bool incomplete) {
+ Node* const* value_inputs,
+ bool incomplete) {
DCHECK_EQ(op->ValueInputCount(), value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
@@ -2288,7 +2708,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
}
result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
// Update the current control dependency for control-producing nodes.
- if (NodeProperties::IsControl(result)) {
+ if (result->op()->ControlOutputCount() > 0) {
environment()->UpdateControlDependency(result);
}
// Update the current effect dependency for effect-producing nodes.
@@ -2313,7 +2733,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
set_environment(success_env);
}
// Add implicit success continuation for throwing nodes.
- if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
const Operator* if_success = common()->IfSuccess();
Node* on_success = graph()->NewNode(if_success, result);
environment()->UpdateControlDependency(on_success);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 41fcf6851f..809a995dff 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -7,6 +7,7 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-type-hint-lowering.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -18,18 +19,20 @@ namespace v8 {
namespace internal {
namespace compiler {
+class Reduction;
class SourcePositionTable;
// The BytecodeGraphBuilder produces a high-level IR graph based on
// interpreter bytecodes.
class BytecodeGraphBuilder {
public:
- BytecodeGraphBuilder(Zone* local_zone, Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- BailoutId osr_ast_id, JSGraph* jsgraph,
- float invocation_frequency,
- SourcePositionTable* source_positions,
- int inlining_id = SourcePosition::kNotInlined);
+ BytecodeGraphBuilder(
+ Zone* local_zone, Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
+ JSGraph* jsgraph, float invocation_frequency,
+ SourcePositionTable* source_positions,
+ int inlining_id = SourcePosition::kNotInlined,
+ JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags);
// Creates a graph by visiting bytecodes.
bool CreateGraph(bool stack_check = true);
@@ -104,24 +107,29 @@ class BytecodeGraphBuilder {
// The main node creation chokepoint. Adds context, frame state, effect,
// and control dependencies depending on the operator.
- Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete);
+ Node* MakeNode(const Operator* op, int value_input_count,
+ Node* const* value_inputs, bool incomplete);
Node** EnsureInputBufferSize(int size);
+ Node* const* GetCallArgumentsFromRegister(Node* callee, Node* receiver,
+ interpreter::Register first_arg,
+ int arg_count);
+ Node* ProcessCallArguments(const Operator* call_op, Node* const* args,
+ int arg_count);
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
- interpreter::Register receiver, size_t arity);
+ interpreter::Register receiver, size_t reg_count);
Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
Node* new_target,
- interpreter::Register first_arg,
- size_t arity);
+ interpreter::Register receiver,
+ size_t reg_count);
Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
Node* new_target,
- interpreter::Register first_arg,
- size_t arity);
+ interpreter::Register receiver,
+ size_t reg_count);
Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
- interpreter::Register first_arg,
- size_t arity);
+ interpreter::Register receiver,
+ size_t reg_count);
// Prepare information for eager deoptimization. This information is carried
// by dedicated {Checkpoint} nodes that are wired into the effect chain.
@@ -150,16 +158,21 @@ class BytecodeGraphBuilder {
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
- void BuildCall(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_hint);
+ void BuildCallVarArgs(TailCallMode tail_call_mode,
+ ConvertReceiverMode receiver_mode);
+ void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
+ Node* const* args, size_t arg_count, int slot_id);
+ void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
+ std::initializer_list<Node*> args, int slot_id) {
+ BuildCall(tail_call_mode, receiver_mode, args.begin(), args.size(),
+ slot_id);
+ }
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
+ void BuildTestingOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
- void BuildForInPrepare();
- void BuildForInNext();
- void BuildInvokeIntrinsic();
// Optional early lowering to the simplified operator level. Returns the node
// representing the lowered operation or {nullptr} if no lowering available.
@@ -167,6 +180,18 @@ class BytecodeGraphBuilder {
// any other invocation of {NewNode} would do.
Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
FeedbackSlot slot);
+ Node* TryBuildSimplifiedToNumber(Node* input, FeedbackSlot slot);
+ Node* TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver,
+ FeedbackSlot slot);
+ Node* TryBuildSimplifiedLoadKeyed(const Operator* op, Node* receiver,
+ Node* key, FeedbackSlot slot);
+ Node* TryBuildSimplifiedStoreNamed(const Operator* op, Node* receiver,
+ Node* value, FeedbackSlot slot);
+ Node* TryBuildSimplifiedStoreKeyed(const Operator* op, Node* receiver,
+ Node* key, Node* value, FeedbackSlot slot);
+
+ // Applies the given early reduction onto the current environment.
+ void ApplyEarlyReduction(Reduction reduction);
// Check the context chain for extensions, for lookup fast paths.
Environment* CheckContextExtensions(uint32_t depth);
@@ -188,6 +213,7 @@ class BytecodeGraphBuilder {
void BuildJumpIf(Node* condition);
void BuildJumpIfNot(Node* condition);
void BuildJumpIfEqual(Node* comperand);
+ void BuildJumpIfNotEqual(Node* comperand);
void BuildJumpIfTrue();
void BuildJumpIfFalse();
void BuildJumpIfToBooleanTrue();
@@ -254,6 +280,9 @@ class BytecodeGraphBuilder {
const Handle<FeedbackVector>& feedback_vector() const {
return feedback_vector_;
}
+ const JSTypeHintLowering& type_hint_lowering() const {
+ return type_hint_lowering_;
+ }
const FrameStateFunctionInfo* frame_state_function_info() const {
return frame_state_function_info_;
}
@@ -290,6 +319,7 @@ class BytecodeGraphBuilder {
Handle<BytecodeArray> bytecode_array_;
Handle<HandlerTable> exception_handler_table_;
Handle<FeedbackVector> feedback_vector_;
+ const JSTypeHintLowering type_hint_lowering_;
const FrameStateFunctionInfo* frame_state_function_info_;
const interpreter::BytecodeArrayIterator* bytecode_iterator_;
const BytecodeAnalysis* bytecode_analysis_;
@@ -332,7 +362,7 @@ class BytecodeGraphBuilder {
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
- static int const kBinaryOperationSmiHintIndex = 2;
+ static int const kBinaryOperationSmiHintIndex = 1;
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 690a52be15..e4795ad0b2 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler.h"
+#include "src/assembler-inl.h"
#include "src/macro-assembler.h"
#include "src/compiler/linkage.h"
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 1ace7dae5e..1bde4c6a4c 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -31,6 +31,11 @@
#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_10(V, T) REPEAT_1_TO_9(V, T) V(T, T, T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_11(V, T) \
+ REPEAT_1_TO_10(V, T) V(T, T, T, T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_12(V, T) \
+ REPEAT_1_TO_11(V, T) V(T, T, T, T, T, T, T, T, T, T, T, T)
namespace v8 {
namespace internal {
@@ -79,6 +84,21 @@ int CodeAssemblerState::parameter_count() const {
CodeAssembler::~CodeAssembler() {}
+#if DEBUG
+void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
+ raw_assembler_->PrintCurrentBlock(os);
+}
+#endif
+
+void CodeAssemblerState::SetInitialDebugInformation(const char* msg,
+ const char* file,
+ int line) {
+#if DEBUG
+ AssemblerDebugInfo debug_info = {msg, file, line};
+ raw_assembler_->SetInitialDebugInformation(debug_info);
+#endif // DEBUG
+}
+
class BreakOnNodeDecorator final : public GraphDecorator {
public:
explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
@@ -160,6 +180,19 @@ bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
}
+bool CodeAssembler::IsInt32AbsWithOverflowSupported() const {
+ return raw_assembler()->machine()->Int32AbsWithOverflow().IsSupported();
+}
+
+bool CodeAssembler::IsInt64AbsWithOverflowSupported() const {
+ return raw_assembler()->machine()->Int64AbsWithOverflow().IsSupported();
+}
+
+bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
+ return Is64() ? IsInt64AbsWithOverflowSupported()
+ : IsInt32AbsWithOverflowSupported();
+}
+
Node* CodeAssembler::Int32Constant(int32_t value) {
return raw_assembler()->Int32Constant(value);
}
@@ -277,6 +310,14 @@ void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
return raw_assembler()->PopAndReturn(pop, value);
}
+void CodeAssembler::ReturnIf(Node* condition, Node* value) {
+ Label if_return(this), if_continue(this);
+ Branch(condition, &if_return, &if_continue);
+ Bind(&if_return);
+ Return(value);
+ Bind(&if_continue);
+}
+
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
void CodeAssembler::Unreachable() {
@@ -306,6 +347,12 @@ void CodeAssembler::Comment(const char* format, ...) {
void CodeAssembler::Bind(Label* label) { return label->Bind(); }
+#if DEBUG
+void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
+ return label->Bind(debug_info);
+}
+#endif // DEBUG
+
Node* CodeAssembler::LoadFramePointer() {
return raw_assembler()->LoadFramePointer();
}
@@ -391,6 +438,13 @@ Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
return value;
}
+Node* CodeAssembler::ChangeFloat64ToUintPtr(Node* value) {
+ if (raw_assembler()->machine()->Is64()) {
+ return raw_assembler()->ChangeFloat64ToUint64(value);
+ }
+ return raw_assembler()->ChangeFloat64ToUint32(value);
+}
+
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
if (raw_assembler()->machine()->Is64()) {
return raw_assembler()->RoundInt64ToFloat64(value);
@@ -462,6 +516,26 @@ Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
return raw_assembler()->AtomicStore(rep, base, offset, value);
}
+#define ATOMIC_FUNCTION(name) \
+ Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
+ Node* offset, Node* value) { \
+ return raw_assembler()->Atomic##name(type, base, offset, value); \
+ }
+ATOMIC_FUNCTION(Exchange);
+ATOMIC_FUNCTION(Add);
+ATOMIC_FUNCTION(Sub);
+ATOMIC_FUNCTION(And);
+ATOMIC_FUNCTION(Or);
+ATOMIC_FUNCTION(Xor);
+#undef ATOMIC_FUNCTION
+
+Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
+ Node* offset, Node* old_value,
+ Node* new_value) {
+ return raw_assembler()->AtomicCompareExchange(type, base, offset, old_value,
+ new_value);
+}
+
Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
Node* roots_array_start =
@@ -520,7 +594,7 @@ Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
return return_value;
}
-// Instantiate CallRuntime() with up to 6 arguments.
+// Instantiate CallRuntime() for argument counts used by CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
Runtime::FunctionId, __VA_ARGS__);
@@ -546,7 +620,7 @@ Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-// Instantiate TailCallRuntime() with up to 6 arguments.
+// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
Runtime::FunctionId, __VA_ARGS__);
@@ -561,11 +635,11 @@ Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
}
-// Instantiate CallStubR() with up to 6 arguments.
+// Instantiate CallStubR() for argument counts used by CSA-generated code.
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
-REPEAT_1_TO_7(INSTANTIATE, Node*)
+REPEAT_1_TO_8(INSTANTIATE, Node*)
#undef INSTANTIATE
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
@@ -600,15 +674,15 @@ Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
MachineType::AnyTagged(), result_size);
Node* nodes[] = {target, args..., context};
-
+ CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-// Instantiate TailCallStub() with up to 6 arguments.
+// Instantiate TailCallStub() for argument counts used by CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
-REPEAT_1_TO_7(INSTANTIATE, Node*)
+REPEAT_1_TO_12(INSTANTIATE, Node*)
#undef INSTANTIATE
template <class... TArgs>
@@ -619,10 +693,12 @@ Node* CodeAssembler::TailCallBytecodeDispatch(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
+ CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-// Instantiate TailCallBytecodeDispatch() with 4 arguments.
+// Instantiate TailCallBytecodeDispatch() for argument counts used by
+// CSA-generated code
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
Node*, Node*);
@@ -690,6 +766,17 @@ void CodeAssembler::Switch(Node* index, Label* default_label,
labels, case_count);
}
+bool CodeAssembler::UnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return raw_assembler()->machine()->UnalignedLoadSupported(machineType,
+ alignment);
+}
+bool CodeAssembler::UnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return raw_assembler()->machine()->UnalignedStoreSupported(machineType,
+ alignment);
+}
+
// RawMachineAssembler delegate helpers:
Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
@@ -707,7 +794,23 @@ RawMachineAssembler* CodeAssembler::raw_assembler() const {
// properly be verified.
class CodeAssemblerVariable::Impl : public ZoneObject {
public:
- explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+ explicit Impl(MachineRepresentation rep)
+ :
+#if DEBUG
+ debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)),
+#endif
+ value_(nullptr),
+ rep_(rep) {
+ }
+
+#if DEBUG
+ AssemblerDebugInfo debug_info() const { return debug_info_; }
+ void set_debug_info(AssemblerDebugInfo debug_info) {
+ debug_info_ = debug_info;
+ }
+
+ AssemblerDebugInfo debug_info_;
+#endif // DEBUG
Node* value_;
MachineRepresentation rep_;
};
@@ -725,6 +828,25 @@ CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
Bind(initial_value);
}
+#if DEBUG
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+ AssemblerDebugInfo debug_info,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+ impl_->set_debug_info(debug_info);
+ state_->variables_.insert(impl_);
+}
+
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+ AssemblerDebugInfo debug_info,
+ MachineRepresentation rep,
+ Node* initial_value)
+ : CodeAssemblerVariable(assembler, debug_info, rep) {
+ impl_->set_debug_info(debug_info);
+ Bind(initial_value);
+}
+#endif // DEBUG
+
CodeAssemblerVariable::~CodeAssemblerVariable() {
state_->variables_.erase(impl_);
}
@@ -732,7 +854,18 @@ CodeAssemblerVariable::~CodeAssemblerVariable() {
void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
Node* CodeAssemblerVariable::value() const {
- DCHECK_NOT_NULL(impl_->value_);
+#if DEBUG
+ if (!IsBound()) {
+ std::stringstream str;
+ str << "#Use of unbound variable:"
+ << "#\n Variable: " << *this;
+ if (state_) {
+ str << "#\n Current Block: ";
+ state_->PrintCurrentBlock(str);
+ }
+ FATAL(str.str().c_str());
+ }
+#endif // DEBUG
return impl_->value_;
}
@@ -740,9 +873,24 @@ MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
+std::ostream& operator<<(std::ostream& os,
+ const CodeAssemblerVariable::Impl& impl) {
+#if DEBUG
+ AssemblerDebugInfo info = impl.debug_info();
+ if (info.name) os << "V" << info;
+#endif // DEBUG
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CodeAssemblerVariable& variable) {
+ os << *variable.impl_;
+ return os;
+}
+
CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
size_t vars_count,
- CodeAssemblerVariable** vars,
+ CodeAssemblerVariable* const* vars,
CodeAssemblerLabel::Type type)
: bound_(false),
merge_count_(0),
@@ -761,7 +909,7 @@ CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); }
void CodeAssemblerLabel::MergeVariables() {
++merge_count_;
- for (auto var : state_->variables_) {
+ for (CodeAssemblerVariable::Impl* var : state_->variables_) {
size_t count = 0;
Node* node = var->value_;
if (node != nullptr) {
@@ -796,19 +944,42 @@ void CodeAssemblerLabel::MergeVariables() {
// the variable after the label bind (it's not possible to add phis to
// the bound label after the fact, just make sure to list the variable
// in the label's constructor's list of merged variables).
- DCHECK(find_if(i->second.begin(), i->second.end(),
- [node](Node* e) -> bool { return node != e; }) ==
- i->second.end());
+#if DEBUG
+ if (find_if(i->second.begin(), i->second.end(),
+ [node](Node* e) -> bool { return node != e; }) !=
+ i->second.end()) {
+ std::stringstream str;
+ str << "Unmerged variable found when jumping to block. \n"
+ << "# Variable: " << *var;
+ if (bound_) {
+ str << "\n# Target block: " << *label_->block();
+ }
+ str << "\n# Current Block: ";
+ state_->PrintCurrentBlock(str);
+ FATAL(str.str().c_str());
+ }
+#endif // DEBUG
}
}
}
}
}
+#if DEBUG
+void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
+ DCHECK(!bound_);
+ state_->raw_assembler_->Bind(label_, debug_info);
+ UpdateVariablesAfterBind();
+}
+#endif // DEBUG
+
void CodeAssemblerLabel::Bind() {
DCHECK(!bound_);
state_->raw_assembler_->Bind(label_);
+ UpdateVariablesAfterBind();
+}
+void CodeAssemblerLabel::UpdateVariablesAfterBind() {
// Make sure that all variables that have changed along any path up to this
// point are marked as merge variables.
for (auto var : state_->variables_) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 8808a82f8e..86275ee0a0 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -85,6 +85,8 @@ typedef std::function<void()> CodeAssemblerCallback;
V(Float64Mod) \
V(Float64Atan2) \
V(Float64Pow) \
+ V(Float64Max) \
+ V(Float64Min) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(IntPtrAddWithOverflow) \
@@ -151,6 +153,7 @@ typedef std::function<void()> CodeAssemblerCallback;
V(TruncateInt64ToInt32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
@@ -164,6 +167,9 @@ typedef std::function<void()> CodeAssemblerCallback;
V(Float64RoundTruncate) \
V(Word32Clz) \
V(Word32Not) \
+ V(Int32AbsWithOverflow) \
+ V(Int64AbsWithOverflow) \
+ V(IntPtrAbsWithOverflow) \
V(Word32BinaryNot)
// A "public" interface used by components outside of compiler directory to
@@ -198,6 +204,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsFloat64RoundDownSupported() const;
bool IsFloat64RoundTiesEvenSupported() const;
bool IsFloat64RoundTruncateSupported() const;
+ bool IsInt32AbsWithOverflowSupported() const;
+ bool IsInt64AbsWithOverflowSupported() const;
+ bool IsIntPtrAbsWithOverflowSupported() const;
// Shortened aliases for use in CodeAssembler subclasses.
typedef CodeAssemblerLabel Label;
@@ -234,11 +243,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Return(Node* value1, Node* value2, Node* value3);
void PopAndReturn(Node* pop, Node* value);
+ void ReturnIf(Node* condition, Node* value);
+
void DebugBreak();
void Unreachable();
void Comment(const char* format, ...);
void Bind(Label* label);
+#if DEBUG
+ void Bind(Label* label, AssemblerDebugInfo debug_info);
+#endif // DEBUG
void Goto(Label* label);
void GotoIf(Node* condition, Label* true_label);
void GotoIfNot(Node* condition, Label* false_label);
@@ -272,6 +286,23 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
+ // Exchange value at raw memory location
+ Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value);
+
+ // Compare and Exchange value at raw memory location
+ Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
+ Node* old_value, Node* new_value);
+
+ Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value);
+
+ Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value);
+
+ Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value);
+
+ Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value);
+
+ Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value);
+
// Store a value to the root array.
Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
@@ -292,6 +323,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+ // Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
+ // Assumes that the double can be exactly represented as an int.
+ Node* ChangeFloat64ToUintPtr(Node* value);
+
// Changes an intptr_t to a double, e.g. for storing an element index
// outside Smi range in a HeapNumber. Lossless on 32-bit,
// rounds on 64-bit (which doesn't affect valid element indices).
@@ -395,6 +430,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void BreakOnNode(int node_id);
+ bool UnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) const;
+ bool UnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) const;
+
protected:
void RegisterCallGenerationCallbacks(
const CodeAssemblerCallback& call_prologue,
@@ -419,6 +459,13 @@ class CodeAssemblerVariable {
MachineRepresentation rep);
CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep,
Node* initial_value);
+#if DEBUG
+ CodeAssemblerVariable(CodeAssembler* assembler, AssemblerDebugInfo debug_info,
+ MachineRepresentation rep);
+ CodeAssemblerVariable(CodeAssembler* assembler, AssemblerDebugInfo debug_info,
+ MachineRepresentation rep, Node* initial_value);
+#endif // DEBUG
+
~CodeAssemblerVariable();
void Bind(Node* value);
Node* value() const;
@@ -426,13 +473,18 @@ class CodeAssemblerVariable {
bool IsBound() const;
private:
+ class Impl;
friend class CodeAssemblerLabel;
friend class CodeAssemblerState;
- class Impl;
+ friend std::ostream& operator<<(std::ostream&, const Impl&);
+ friend std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&);
Impl* impl_;
CodeAssemblerState* state_;
};
+std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&);
+std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable::Impl&);
+
class CodeAssemblerLabel {
public:
enum Type { kDeferred, kNonDeferred };
@@ -448,18 +500,30 @@ class CodeAssemblerLabel {
: CodeAssemblerLabel(assembler, merged_variables.length(),
&(merged_variables[0]), type) {}
CodeAssemblerLabel(
- CodeAssembler* assembler, size_t count, CodeAssemblerVariable** vars,
+ CodeAssembler* assembler, size_t count,
+ CodeAssemblerVariable* const* vars,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred);
CodeAssemblerLabel(
+ CodeAssembler* assembler,
+ std::initializer_list<CodeAssemblerVariable*> vars,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, vars.size(), vars.begin(), type) {}
+ CodeAssemblerLabel(
CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
: CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
~CodeAssemblerLabel();
+ inline bool is_bound() const { return bound_; }
+
private:
friend class CodeAssembler;
void Bind();
+#if DEBUG
+ void Bind(AssemblerDebugInfo debug_info);
+#endif // DEBUG
+ void UpdateVariablesAfterBind();
void MergeVariables();
bool bound_;
@@ -493,6 +557,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
const char* name() const { return name_; }
int parameter_count() const;
+#if DEBUG
+ void PrintCurrentBlock(std::ostream& os);
+#endif // DEBUG
+ void SetInitialDebugInformation(const char* msg, const char* file, int line);
+
private:
friend class CodeAssembler;
friend class CodeAssemblerLabel;
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index bdedbecf26..7f09b8524e 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -214,15 +214,6 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
-
-// TODO(dcarney): generify this on bleeding_edge and replace this call
-// when merged.
-static inline void FinishCode(MacroAssembler* masm) {
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
- masm->CheckConstPool(true, false);
-#endif
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index bbd9452c84..3723a98ebe 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -5,12 +5,14 @@
#include "src/compiler/code-generator.h"
#include "src/address-map.h"
+#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/frames-inl.h"
+#include "src/macro-assembler-inl.h"
namespace v8 {
namespace internal {
@@ -207,7 +209,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- FinishCode(masm());
+ FinishCode();
// Emit the jump tables.
if (jump_tables_) {
@@ -218,10 +220,13 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
-
+ // The PerfJitLogger logs code up until here, excluding the safepoint
+ // table. Resolve the unwinding info now so it is aware of the same code size
+ // as reported by perf.
unwinding_info_writer_.Finish(masm()->pc_offset());
+ safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
+
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
result->set_is_turbofanned(true);
@@ -698,9 +703,13 @@ void CodeGenerator::TranslateStateValueDescriptor(
TranslateStateValueDescriptor(field.desc, field.nested, translation,
iter);
}
- } else if (desc->IsArguments()) {
+ } else if (desc->IsArgumentsElements()) {
+ if (translation != nullptr) {
+ translation->ArgumentsElements(desc->is_rest());
+ }
+ } else if (desc->IsArgumentsLength()) {
if (translation != nullptr) {
- translation->BeginArgumentsObject(0);
+ translation->ArgumentsLength(desc->is_rest());
}
} else if (desc->IsDuplicate()) {
if (translation != nullptr) {
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 74958d05f3..b4873ff2d8 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -178,6 +178,8 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot);
+ void FinishCode();
+
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
// ===========================================================================
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 637b0646b5..0b98d575b1 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -354,7 +354,7 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1) \
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
+ V(Throw, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable | Operator::kNoThrow, 0, 1, 1, 0, 1, 1) \
@@ -1232,11 +1232,24 @@ const Operator* CommonOperatorBuilder::TypedStateValues(
TypedStateValueInfo(types, bitmask)); // parameters
}
-const Operator* CommonOperatorBuilder::ArgumentsObjectState() {
- return new (zone()) Operator( // --
- IrOpcode::kArgumentsObjectState, Operator::kPure, // opcode
- "ArgumentsObjectState", // name
- 0, 0, 0, 1, 0, 0); // counts
+const Operator* CommonOperatorBuilder::ArgumentsElementsState(bool is_rest) {
+ return new (zone()) Operator1<bool>( // --
+ IrOpcode::kArgumentsElementsState, Operator::kPure, // opcode
+ "ArgumentsElementsState", // name
+ 0, 0, 0, 1, 0, 0, is_rest); // counts
+}
+
+const Operator* CommonOperatorBuilder::ArgumentsLengthState(bool is_rest) {
+ return new (zone()) Operator1<bool>( // --
+ IrOpcode::kArgumentsLengthState, Operator::kPure, // opcode
+ "ArgumentsLengthState", // name
+ 0, 0, 0, 1, 0, 0, is_rest); // counts
+}
+
+bool IsRestOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kArgumentsElementsState ||
+ op->opcode() == IrOpcode::kArgumentsLengthState);
+ return OpParameter<bool>(op);
}
const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 46829593a4..d54bcc5311 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -297,6 +297,11 @@ SparseInputMask SparseInputMaskOf(Operator const*);
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
WARN_UNUSED_RESULT;
+// The ArgumentsElementsState and ArgumentsLengthState can either describe an
+// unmapped arguments backing store or the backing store of the rest parameters.
+// IsRestOf(op) is true in the second case.
+bool IsRestOf(Operator const*);
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -362,7 +367,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* StateValues(int arguments, SparseInputMask bitmask);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
SparseInputMask bitmask);
- const Operator* ArgumentsObjectState();
+ const Operator* ArgumentsElementsState(bool is_rest);
+ const Operator* ArgumentsLengthState(bool is_rest);
const Operator* ObjectState(int pointer_slots);
const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/control-equivalence.cc b/deps/v8/src/compiler/control-equivalence.cc
index af1a11565c..2144b844a3 100644
--- a/deps/v8/src/compiler/control-equivalence.cc
+++ b/deps/v8/src/compiler/control-equivalence.cc
@@ -15,7 +15,7 @@ namespace internal {
namespace compiler {
void ControlEquivalence::Run(Node* exit) {
- if (GetClass(exit) == kInvalidClass) {
+ if (!Participates(exit) || GetClass(exit) == kInvalidClass) {
DetermineParticipation(exit);
RunUndirectedDFS(exit);
}
@@ -28,10 +28,6 @@ STATIC_CONST_MEMBER_DEFINITION const size_t ControlEquivalence::kInvalidClass;
void ControlEquivalence::VisitPre(Node* node) {
TRACE("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
-
- // Dispense a new pre-order number.
- SetNumber(node, NewDFSNumber());
- TRACE(" Assigned DFS number is %zu\n", GetNumber(node));
}
@@ -105,7 +101,7 @@ void ControlEquivalence::RunUndirectedDFS(Node* exit) {
++(entry.input);
if (NodeProperties::IsControlEdge(edge)) {
// Visit next control input.
- if (!GetData(input)->participates) continue;
+ if (!Participates(input)) continue;
if (GetData(input)->visited) continue;
if (GetData(input)->on_stack) {
// Found backedge if input is on stack.
@@ -135,7 +131,7 @@ void ControlEquivalence::RunUndirectedDFS(Node* exit) {
++(entry.use);
if (NodeProperties::IsControlEdge(edge)) {
// Visit next control use.
- if (!GetData(use)->participates) continue;
+ if (!Participates(use)) continue;
if (GetData(use)->visited) continue;
if (GetData(use)->on_stack) {
// Found backedge if use is on stack.
@@ -168,8 +164,8 @@ void ControlEquivalence::RunUndirectedDFS(Node* exit) {
void ControlEquivalence::DetermineParticipationEnqueue(ZoneQueue<Node*>& queue,
Node* node) {
- if (!GetData(node)->participates) {
- GetData(node)->participates = true;
+ if (!Participates(node)) {
+ AllocateData(node);
queue.push(node);
}
}
@@ -191,7 +187,7 @@ void ControlEquivalence::DetermineParticipation(Node* exit) {
void ControlEquivalence::DFSPush(DFSStack& stack, Node* node, Node* from,
DFSDirection dir) {
- DCHECK(GetData(node)->participates);
+ DCHECK(Participates(node));
DCHECK(!GetData(node)->visited);
GetData(node)->on_stack = true;
Node::InputEdges::iterator input = node->input_edges().begin();
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index b76e04fe43..e6aa7f5072 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE ControlEquivalence final
graph_(graph),
dfs_number_(0),
class_number_(1),
- node_data_(graph->NodeCount(), EmptyData(), zone) {}
+ node_data_(graph->NodeCount(), zone) {}
// Run the main algorithm starting from the {exit} control node. This causes
// the following iterations over control edges of the graph:
@@ -80,17 +80,21 @@ class V8_EXPORT_PRIVATE ControlEquivalence final
// The stack is used during the undirected DFS walk.
typedef ZoneStack<DFSStackEntry> DFSStack;
- struct NodeData {
+ struct NodeData : ZoneObject {
+ explicit NodeData(Zone* zone)
+ : class_number(kInvalidClass),
+ blist(BracketList(zone)),
+ visited(false),
+ on_stack(false) {}
+
size_t class_number; // Equivalence class number assigned to node.
- size_t dfs_number; // Pre-order DFS number assigned to node.
- bool visited; // Indicates node has already been visited.
- bool on_stack; // Indicates node is on DFS stack during walk.
- bool participates; // Indicates node participates in DFS walk.
BracketList blist; // List of brackets per node.
+ bool visited : 1; // Indicates node has already been visited.
+ bool on_stack : 1; // Indicates node is on DFS stack during walk.
};
// The per-node data computed during the DFS walk.
- typedef ZoneVector<NodeData> Data;
+ typedef ZoneVector<NodeData*> Data;
// Called at pre-visit during DFS walk.
void VisitPre(Node* node);
@@ -126,32 +130,34 @@ class V8_EXPORT_PRIVATE ControlEquivalence final
private:
NodeData* GetData(Node* node) {
size_t const index = node->id();
- if (index >= node_data_.size()) node_data_.resize(index + 1, EmptyData());
- return &node_data_[index];
+ if (index >= node_data_.size()) node_data_.resize(index + 1);
+ return node_data_[index];
+ }
+ void AllocateData(Node* node) {
+ size_t const index = node->id();
+ if (index >= node_data_.size()) node_data_.resize(index + 1);
+ node_data_[index] = new (zone_) NodeData(zone_);
}
+
int NewClassNumber() { return class_number_++; }
int NewDFSNumber() { return dfs_number_++; }
- // Template used to initialize per-node data.
- NodeData EmptyData() {
- return {kInvalidClass, 0, false, false, false, BracketList(zone_)};
- }
-
- // Accessors for the DFS number stored within the per-node data.
- size_t GetNumber(Node* node) { return GetData(node)->dfs_number; }
- void SetNumber(Node* node, size_t number) {
- GetData(node)->dfs_number = number;
- }
+ bool Participates(Node* node) { return GetData(node) != nullptr; }
// Accessors for the equivalence class stored within the per-node data.
size_t GetClass(Node* node) { return GetData(node)->class_number; }
void SetClass(Node* node, size_t number) {
+ DCHECK(Participates(node));
GetData(node)->class_number = number;
}
// Accessors for the bracket list stored within the per-node data.
- BracketList& GetBracketList(Node* node) { return GetData(node)->blist; }
+ BracketList& GetBracketList(Node* node) {
+ DCHECK(Participates(node));
+ return GetData(node)->blist;
+ }
void SetBracketList(Node* node, BracketList& list) {
+ DCHECK(Participates(node));
GetData(node)->blist = list;
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 865e909ad8..0e48932c8d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -148,8 +148,8 @@ void RemoveRegionNode(Node* node) {
node->Kill();
}
-void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
- CommonOperatorBuilder* common,
+void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
+ Graph* graph, CommonOperatorBuilder* common,
BlockEffectControlMap* block_effects,
SourcePositionTable* source_positions) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
@@ -216,7 +216,7 @@ void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
// Grab the IfTrue/IfFalse projections of the Branch.
BranchMatcher matcher(branch);
// Check/collect other Phi/EffectPhi nodes hanging off the Merge.
- NodeVector phis(graph->zone());
+ NodeVector phis(temp_zone);
for (Node* const use : merge->uses()) {
if (use == branch || use == cond) continue;
// We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
@@ -456,8 +456,8 @@ void EffectControlLinearizer::Run() {
case BasicBlock::kBranch:
ProcessNode(block->control_input(), &frame_state, &effect, &control);
- TryCloneBranch(block->control_input(), block, graph(), common(),
- &block_effects, source_positions_);
+ TryCloneBranch(block->control_input(), block, temp_zone(), graph(),
+ common(), &block_effects, source_positions_);
break;
}
@@ -485,22 +485,6 @@ void EffectControlLinearizer::Run() {
}
}
-namespace {
-
-void TryScheduleCallIfSuccess(Node* node, Node** control) {
- // Schedule the call's IfSuccess node if there is no exception use.
- if (!NodeProperties::IsExceptionalCall(node)) {
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsControlEdge(edge) &&
- edge.from()->opcode() == IrOpcode::kIfSuccess) {
- *control = edge.from();
- }
- }
- }
-}
-
-} // namespace
-
void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
Node** effect, Node** control) {
SourcePositionTable::Scope scope(source_positions_,
@@ -584,13 +568,9 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
for (int i = 0; i < node->op()->ControlInputCount(); i++) {
NodeProperties::ReplaceControlInput(node, *control, i);
}
- // Update the current control and wire IfSuccess right after calls.
+ // Update the current control.
if (node->op()->ControlOutputCount() > 0) {
*control = node;
- if (node->opcode() == IrOpcode::kCall) {
- // Schedule the call's IfSuccess node (if there is no exception use).
- TryScheduleCallIfSuccess(node, control);
- }
}
}
@@ -640,6 +620,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToBit:
result = LowerTruncateTaggedToBit(node);
break;
+ case IrOpcode::kTruncateTaggedPointerToBit:
+ result = LowerTruncateTaggedPointerToBit(node);
+ break;
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
@@ -721,6 +704,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsDetectableCallable:
result = LowerObjectIsDetectableCallable(node);
break;
+ case IrOpcode::kObjectIsNaN:
+ result = LowerObjectIsNaN(node);
+ break;
case IrOpcode::kObjectIsNonCallable:
result = LowerObjectIsNonCallable(node);
break;
@@ -736,11 +722,17 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsString:
result = LowerObjectIsString(node);
break;
+ case IrOpcode::kObjectIsSymbol:
+ result = LowerObjectIsSymbol(node);
+ break;
case IrOpcode::kObjectIsUndetectable:
result = LowerObjectIsUndetectable(node);
break;
- case IrOpcode::kNewRestParameterElements:
- result = LowerNewRestParameterElements(node);
+ case IrOpcode::kArgumentsFrame:
+ result = LowerArgumentsFrame(node);
+ break;
+ case IrOpcode::kArgumentsLength:
+ result = LowerArgumentsLength(node);
break;
case IrOpcode::kNewUnmappedArgumentsElements:
result = LowerNewUnmappedArgumentsElements(node);
@@ -837,8 +829,62 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
#define __ gasm()->
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
- return AllocateHeapNumberWithValue(value);
+
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto if_heapnumber =
+ __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred,
+ 1 + (mode == CheckForMinusZeroMode::kCheckForMinusZero) +
+ !machine()->Is64());
+ auto if_int32 = __ MakeLabel<1>();
+
+ Node* value32 = __ RoundFloat64ToInt32(value);
+ __ GotoIf(__ Float64Equal(value, __ ChangeInt32ToFloat64(value32)),
+ &if_int32);
+ __ Goto(&if_heapnumber);
+
+ __ Bind(&if_int32);
+ {
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ Node* zero = __ Int32Constant(0);
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto if_smi = __ MakeLabel<2>();
+
+ __ GotoIf(__ Word32Equal(value32, zero), &if_zero);
+ __ Goto(&if_smi);
+
+ __ Bind(&if_zero);
+ {
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ __ GotoIf(__ Int32LessThan(__ Float64ExtractHighWord32(value), zero),
+ &if_heapnumber);
+ __ Goto(&if_smi);
+ }
+
+ __ Bind(&if_smi);
+ }
+
+ if (machine()->Is64()) {
+ Node* value_smi = ChangeInt32ToSmi(value32);
+ __ Goto(&done, value_smi);
+ } else {
+ Node* add = __ Int32AddWithOverflow(value32, value32);
+ Node* ovf = __ Projection(1, add);
+ __ GotoIf(ovf, &if_heapnumber);
+ Node* value_smi = __ Projection(0, add);
+ __ Goto(&done, value_smi);
+ }
+ }
+
+ __ Bind(&if_heapnumber);
+ {
+ Node* value_number = AllocateHeapNumberWithValue(value);
+ __ Goto(&done, value_number);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
@@ -978,6 +1024,53 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_heapnumber = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<5>(MachineRepresentation::kBit);
+
+ Node* zero = __ Int32Constant(0);
+ Node* fzero = __ Float64Constant(0.0);
+
+ // Check if {value} is false.
+ __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
+
+ // Check if {value} is the empty string.
+ __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
+
+ // Load the map of {value}.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+ // Check if the {value} is undetectable and immediately return false.
+ Node* value_map_bitfield =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ __ GotoUnless(
+ __ Word32Equal(__ Word32And(value_map_bitfield,
+ __ Int32Constant(1 << Map::kIsUndetectable)),
+ zero),
+ &done, zero);
+
+ // Check if {value} is a HeapNumber.
+ __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
+ &if_heapnumber);
+
+ // All other values that reach here are true.
+ __ Goto(&done, __ Int32Constant(1));
+
+ __ Bind(&if_heapnumber);
+ {
+ // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+ // NaN.
+ Node* value_value =
+ __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
Node* value = node->InputAt(0);
@@ -1483,9 +1576,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
- Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
- Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
- __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
+ Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, unsafe, frame_state);
return value;
}
@@ -1715,6 +1807,29 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
+ Node* value = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+
+ auto done = __ MakeLabel<3>(MachineRepresentation::kBit);
+
+ // Check if {value} is a Smi.
+ __ GotoIf(ObjectIsSmi(value), &done, zero);
+
+ // Check if {value} is a HeapNumber.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ __ GotoUnless(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+ zero);
+
+ // Check if {value} contains a NaN.
+ Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done,
+ __ Word32Equal(__ Float64Equal(value_value, value_value), zero));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
Node* value = node->InputAt(0);
@@ -1813,6 +1928,28 @@ Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* vfalse =
+ __ Word32Equal(value_instance_type, __ Uint32Constant(SYMBOL_TYPE));
+ __ Goto(&done, vfalse);
+
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value = node->InputAt(0);
@@ -1839,21 +1976,83 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
- int const formal_parameter_count = ParameterCountOf(node->op());
+Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
+ Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
+ int formal_parameter_count = FormalParameterCountOf(node->op());
+ bool is_rest_length = IsRestLengthOf(node->op());
+ DCHECK(formal_parameter_count >= 0);
- Callable const callable = CodeFactory::NewRestParameterElements(isolate());
- Operator::Properties const properties = node->op()->properties();
- CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()),
- __ IntPtrConstant(formal_parameter_count),
- __ NoContextConstant());
+ if (is_rest_length) {
+ // The ArgumentsLength node is computing the number of rest parameters,
+ // which is max(0, actual_parameter_count - formal_parameter_count).
+ // We have to distinguish the case, when there is an arguments adaptor frame
+ // (i.e., arguments_frame != LoadFramePointer()).
+ auto if_adaptor_frame = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>(MachineRepresentation::kTaggedSigned);
+
+ Node* frame = __ LoadFramePointer();
+ __ GotoIf(__ WordEqual(arguments_frame, frame), &done, __ SmiConstant(0));
+ __ Goto(&if_adaptor_frame);
+
+ __ Bind(&if_adaptor_frame);
+ Node* arguments_length = __ Load(
+ MachineType::TaggedSigned(), arguments_frame,
+ __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ Node* rest_length =
+ __ IntSub(arguments_length, __ SmiConstant(formal_parameter_count));
+ __ GotoIf(__ IntLessThan(rest_length, __ SmiConstant(0)), &done,
+ __ SmiConstant(0));
+ __ Goto(&done, rest_length);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+ } else {
+ // The ArgumentsLength node is computing the actual number of arguments.
+ // We have to distinguish the case when there is an arguments adaptor frame
+ // (i.e., arguments_frame != LoadFramePointer()).
+ auto if_adaptor_frame = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedSigned);
+
+ Node* frame = __ LoadFramePointer();
+ __ GotoIf(__ WordEqual(arguments_frame, frame), &done,
+ __ SmiConstant(formal_parameter_count));
+ __ Goto(&if_adaptor_frame);
+
+ __ Bind(&if_adaptor_frame);
+ Node* arguments_length = __ Load(
+ MachineType::TaggedSigned(), arguments_frame,
+ __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Goto(&done, arguments_length);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+ }
+}
+
+Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
+ auto done = __ MakeLabel<2>(MachineType::PointerRepresentation());
+
+ Node* frame = __ LoadFramePointer();
+ Node* parent_frame =
+ __ Load(MachineType::AnyTagged(), frame,
+ __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+ Node* parent_frame_type = __ Load(
+ MachineType::AnyTagged(), parent_frame,
+ __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ GotoIf(__ WordEqual(parent_frame_type,
+ __ IntPtrConstant(StackFrame::TypeToMarker(
+ StackFrame::ARGUMENTS_ADAPTOR))),
+ &done, parent_frame);
+ __ Goto(&done, frame);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
- int const formal_parameter_count = ParameterCountOf(node->op());
+ Node* frame = NodeProperties::GetValueInput(node, 0);
+ Node* length = NodeProperties::GetValueInput(node, 1);
Callable const callable =
CodeFactory::NewUnmappedArgumentsElements(isolate());
@@ -1861,8 +2060,7 @@ Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()),
- __ IntPtrConstant(formal_parameter_count),
+ return __ Call(desc, __ HeapConstant(callable.code()), frame, length,
__ NoContextConstant());
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 016d6025c1..a1eb03cd11 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -77,17 +77,21 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerChangeTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToBit(Node* node);
+ Node* LowerTruncateTaggedPointerToBit(Node* node);
Node* LowerTruncateTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
Node* LowerObjectIsDetectableCallable(Node* node);
+ Node* LowerObjectIsNaN(Node* node);
Node* LowerObjectIsNonCallable(Node* node);
Node* LowerObjectIsNumber(Node* node);
Node* LowerObjectIsReceiver(Node* node);
Node* LowerObjectIsSmi(Node* node);
Node* LowerObjectIsString(Node* node);
+ Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
- Node* LowerNewRestParameterElements(Node* node);
+ Node* LowerArgumentsFrame(Node* node);
+ Node* LowerArgumentsLength(Node* node);
Node* LowerNewUnmappedArgumentsElements(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerStringCharAt(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index c05092e06e..d59f4a931b 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -6,6 +6,8 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/counters.h"
namespace v8 {
@@ -44,6 +46,8 @@ Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreElement:
return ReduceStore(node);
+ case IrOpcode::kCheckMaps:
+ return ReduceCheckMaps(node);
case IrOpcode::kAllocate:
return ReduceAllocate(node);
case IrOpcode::kFinishRegion:
@@ -84,6 +88,9 @@ Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
}
return NoChange();
}
+ case IrOpcode::kNewUnmappedArgumentsElements:
+ arguments_elements_.insert(node);
+ break;
default:
// TODO(sigurds): Change this to GetFrameStateInputCount once
// it is working. For now we use EffectInputCount > 0 to determine
@@ -166,6 +173,21 @@ Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
return NoChange();
}
+Reduction EscapeAnalysisReducer::ReduceCheckMaps(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kCheckMaps);
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
+ if (escape_analysis()->IsVirtual(
+ SkipTypeGuards(NodeProperties::GetValueInput(node, 0))) &&
+ !escape_analysis()->IsEscaped(node)) {
+ TRACE("Removed #%d (%s) from effect chain\n", node->id(),
+ node->op()->mnemonic());
+ RelaxEffectsAndControls(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
@@ -380,6 +402,120 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
#endif // DEBUG
}
+void EscapeAnalysisReducer::Finalize() {
+ for (Node* node : arguments_elements_) {
+ DCHECK(node->opcode() == IrOpcode::kNewUnmappedArgumentsElements);
+
+ Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
+ if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
+ Node* arguments_length = NodeProperties::GetValueInput(node, 1);
+ if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
+
+ Node* arguments_length_state = nullptr;
+ for (Edge edge : arguments_length->use_edges()) {
+ Node* use = edge.from();
+ switch (use->opcode()) {
+ case IrOpcode::kObjectState:
+ case IrOpcode::kTypedObjectState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kTypedStateValues:
+ if (!arguments_length_state) {
+ arguments_length_state = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->ArgumentsLengthState(
+ IsRestLengthOf(arguments_length->op())));
+ NodeProperties::SetType(arguments_length_state,
+ Type::OtherInternal());
+ }
+ edge.UpdateTo(arguments_length_state);
+ break;
+ default:
+ break;
+ }
+ }
+
+ bool escaping_use = false;
+ ZoneVector<Node*> loads(zone());
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (use->use_edges().empty()) {
+ // A node without uses is dead, so we don't have to care about it.
+ continue;
+ }
+ switch (use->opcode()) {
+ case IrOpcode::kStateValues:
+ case IrOpcode::kTypedStateValues:
+ case IrOpcode::kObjectState:
+ case IrOpcode::kTypedObjectState:
+ break;
+ case IrOpcode::kLoadElement:
+ loads.push_back(use);
+ break;
+ case IrOpcode::kLoadField:
+ if (FieldAccessOf(use->op()).offset == FixedArray::kLengthOffset) {
+ loads.push_back(use);
+ } else {
+ escaping_use = true;
+ }
+ break;
+ default:
+ // If the arguments elements node node is used by an unhandled node,
+ // then we cannot remove this allocation.
+ escaping_use = true;
+ break;
+ }
+ if (escaping_use) break;
+ }
+ if (!escaping_use) {
+ Node* arguments_elements_state = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->ArgumentsElementsState(
+ IsRestLengthOf(arguments_length->op())));
+ NodeProperties::SetType(arguments_elements_state, Type::OtherInternal());
+ ReplaceWithValue(node, arguments_elements_state);
+
+ ElementAccess stack_access;
+ stack_access.base_is_tagged = BaseTaggedness::kUntaggedBase;
+ // Reduce base address by {kPointerSize} such that (length - index)
+ // resolves to the right position.
+ stack_access.header_size =
+ CommonFrameConstants::kFixedFrameSizeAboveFp - kPointerSize;
+ stack_access.type = Type::NonInternal();
+ stack_access.machine_type = MachineType::AnyTagged();
+ stack_access.write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
+ const Operator* load_stack_op =
+ jsgraph()->simplified()->LoadElement(stack_access);
+
+ for (Node* load : loads) {
+ switch (load->opcode()) {
+ case IrOpcode::kLoadElement: {
+ Node* index = NodeProperties::GetValueInput(load, 1);
+ // {offset} is a reverted index starting from 1. The base address is
+ // adapted to allow offsets starting from 1.
+ Node* offset = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->NumberSubtract(), arguments_length,
+ index);
+ NodeProperties::SetType(offset,
+ TypeCache::Get().kArgumentsLengthType);
+ NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
+ NodeProperties::ReplaceValueInput(load, offset, 1);
+ NodeProperties::ChangeOp(load, load_stack_op);
+ break;
+ }
+ case IrOpcode::kLoadField: {
+ DCHECK_EQ(FieldAccessOf(load->op()).offset,
+ FixedArray::kLengthOffset);
+ Node* length = NodeProperties::GetValueInput(node, 1);
+ ReplaceWithValue(load, length);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 01c2ae118b..4373fa4c66 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
Reduction Reduce(Node* node) final;
+ void Finalize() override;
+
// Verifies that all virtual allocation nodes have been dealt with. Run it
// after this reducer has been applied. Has no effect in release mode.
void VerifyReplacement() const;
@@ -36,6 +38,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
Reduction ReduceNode(Node* node);
Reduction ReduceLoad(Node* node);
Reduction ReduceStore(Node* node);
+ Reduction ReduceCheckMaps(Node* node);
Reduction ReduceAllocate(Node* node);
Reduction ReduceFinishRegion(Node* node);
Reduction ReduceReferenceEqual(Node* node);
@@ -57,6 +60,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
// and nodes that do not need a visit from ReduceDeoptState etc.
BitVector fully_reduced_;
bool exists_virtual_allocate_;
+ std::set<Node*> arguments_elements_;
bool compilation_failed_ = false;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 255e74eac1..2e0adc6f85 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -635,6 +635,11 @@ void EscapeStatusAnalysis::ResizeStatusVector() {
size_t EscapeStatusAnalysis::GetStatusVectorSize() { return status_.size(); }
void EscapeStatusAnalysis::RunStatusAnalysis() {
+ // TODO(tebbi): This checks for faulty VirtualObject states, which can happen
+ // due to bug https://bugs.chromium.org/p/v8/issues/detail?id=6302. As a
+ // workaround, we set everything to escaped if such a faulty state was
+ // detected.
+ bool all_objects_complete = object_analysis_->AllObjectsComplete();
ResizeStatusVector();
while (!status_stack_.empty()) {
Node* node = status_stack_.back();
@@ -642,6 +647,7 @@ void EscapeStatusAnalysis::RunStatusAnalysis() {
status_[node->id()] &= ~kOnStack;
Process(node);
status_[node->id()] |= kVisited;
+ if (!all_objects_complete) SetEscaped(node);
}
}
@@ -807,6 +813,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kStateValues:
case IrOpcode::kReferenceEqual:
case IrOpcode::kFinishRegion:
+ case IrOpcode::kCheckMaps:
if (IsEscaped(use) && SetEscaped(rep)) {
TRACE(
"Setting #%d (%s) to escaped because of use by escaping node "
@@ -839,10 +846,12 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kStringCharCodeAt:
case IrOpcode::kStringIndexOf:
case IrOpcode::kObjectIsDetectableCallable:
+ case IrOpcode::kObjectIsNaN:
case IrOpcode::kObjectIsNonCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsString:
+ case IrOpcode::kObjectIsSymbol:
case IrOpcode::kObjectIsUndetectable:
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
@@ -989,6 +998,25 @@ bool EscapeStatusAnalysis::IsNotReachable(Node* node) {
return aliases_[node->id()] == kNotReachable;
}
+bool EscapeAnalysis::AllObjectsComplete() {
+ for (VirtualState* state : virtual_states_) {
+ if (state) {
+ for (size_t i = 0; i < state->size(); ++i) {
+ if (VirtualObject* object = state->VirtualObjectFromAlias(i)) {
+ if (!object->AllFieldsClear()) {
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (object->GetField(i) == nullptr) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
void EscapeAnalysis::RunObjectAnalysis() {
virtual_states_.resize(graph()->NodeCount());
ZoneDeque<Node*> queue(zone());
@@ -1032,6 +1060,7 @@ void EscapeAnalysis::RunObjectAnalysis() {
danglers.clear();
}
}
+
#ifdef DEBUG
if (FLAG_trace_turbo_escape) {
DebugPrint();
@@ -1125,6 +1154,9 @@ bool EscapeAnalysis::Process(Node* node) {
case IrOpcode::kLoadElement:
ProcessLoadElement(node);
break;
+ case IrOpcode::kCheckMaps:
+ ProcessCheckMaps(node);
+ break;
case IrOpcode::kStart:
ProcessStart(node);
break;
@@ -1162,6 +1194,10 @@ void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
case IrOpcode::kFinishRegion:
case IrOpcode::kObjectIsSmi:
break;
+ case IrOpcode::kCheckMaps: {
+ CheckMapsParameters params = CheckMapsParametersOf(node->op());
+ if (params.flags() == CheckMapsFlag::kNone) break;
+ } // Fallthrough.
default:
VirtualState* state = virtual_states_[node->id()];
if (VirtualObject* obj =
@@ -1515,6 +1551,46 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
}
}
+void EscapeAnalysis::ProcessCheckMaps(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kCheckMaps);
+ ForwardVirtualState(node);
+ Node* checked = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
+ if (FLAG_turbo_experimental) {
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* object = GetVirtualObject(state, checked)) {
+ if (!object->IsTracked()) {
+ if (status_analysis_->SetEscaped(node)) {
+ TRACE(
+ "Setting #%d (%s) to escaped because checked object #%i is not "
+ "tracked\n",
+ node->id(), node->op()->mnemonic(), object->id());
+ }
+ return;
+ }
+ CheckMapsParameters params = CheckMapsParametersOf(node->op());
+
+ Node* value = object->GetField(HeapObject::kMapOffset / kPointerSize);
+ if (value) {
+ value = ResolveReplacement(value);
+ // TODO(tebbi): We want to extend this beyond constant folding with a
+ // CheckMapsValue operator that takes the load-eliminated map value as
+ // input.
+ if (value->opcode() == IrOpcode::kHeapConstant &&
+ params.maps().contains(ZoneHandleSet<Map>(
+ Handle<Map>::cast(OpParameter<Handle<HeapObject>>(value))))) {
+ TRACE("CheckMaps #%i seems to be redundant (until now).\n",
+ node->id());
+ return;
+ }
+ }
+ }
+ }
+ if (status_analysis_->SetEscaped(node)) {
+ TRACE("Setting #%d (%s) to escaped (checking #%i)\n", node->id(),
+ node->op()->mnemonic(), checked->id());
+ }
+}
+
void EscapeAnalysis::ProcessLoadElement(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
ForwardVirtualState(node);
@@ -1650,6 +1726,8 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
for (size_t i = 0; i < vobj->field_count(); ++i) {
if (Node* field = vobj->GetField(i)) {
cache_->fields().push_back(ResolveReplacement(field));
+ } else {
+ return nullptr;
}
}
int input_count = static_cast<int>(cache_->fields().size());
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 52edc4be0b..e5e8aa362a 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -37,6 +37,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
bool IsCyclicObjectState(Node* effect, Node* node);
bool ExistsVirtualAllocate();
bool SetReplacement(Node* node, Node* rep);
+ bool AllObjectsComplete();
private:
void RunObjectAnalysis();
@@ -45,6 +46,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
void ProcessStoreField(Node* node);
void ProcessLoadElement(Node* node);
void ProcessStoreElement(Node* node);
+ void ProcessCheckMaps(Node* node);
void ProcessAllocationUsers(Node* node);
void ProcessAllocation(Node* node);
void ProcessFinishRegion(Node* node);
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 1ba1044eab..be90a33a21 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -148,7 +148,8 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
move->SetPending();
// We may need to split moves between FP locations differently.
- bool is_fp_loc_move = !kSimpleFPAliasing && destination.IsFPLocationOperand();
+ const bool is_fp_loc_move =
+ !kSimpleFPAliasing && destination.IsFPLocationOperand();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
@@ -158,7 +159,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
if (other->IsEliminated()) continue;
if (other->IsPending()) continue;
if (other->source().InterferesWith(destination)) {
- if (!kSimpleFPAliasing && is_fp_loc_move &&
+ if (is_fp_loc_move &&
LocationOperand::cast(other->source()).representation() >
split_rep_) {
// 'other' must also be an FP location move. Break it into fragments
@@ -213,7 +214,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
move->Eliminate();
// Update outstanding moves whose source may now have been moved.
- if (!kSimpleFPAliasing && is_fp_loc_move) {
+ if (is_fp_loc_move) {
// We may have to split larger moves.
for (size_t i = 0; i < moves->size(); ++i) {
auto other = (*moves)[i];
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index dbeff87ee0..12746c2b13 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -56,6 +56,10 @@ Node* GraphAssembler::CEntryStubConstant(int result_size) {
return jsgraph()->CEntryStubConstant(result_size);
}
+Node* GraphAssembler::LoadFramePointer() {
+ return graph()->NewNode(machine()->LoadFramePointer());
+}
+
#define SINGLETON_CONST_DEF(Name) \
Node* GraphAssembler::Name() { return jsgraph()->Name(); }
JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DEF)
@@ -95,8 +99,8 @@ Node* GraphAssembler::Projection(int index, Node* value) {
Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
return current_effect_ =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED), size,
- current_effect_, current_control_);
+ graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
+ size, current_effect_, current_control_);
}
Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 057e78184e..8b4f0c37a7 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -41,6 +41,7 @@ namespace compiler {
V(Word32Shl) \
V(IntAdd) \
V(IntSub) \
+ V(IntLessThan) \
V(UintLessThan) \
V(Int32Add) \
V(Int32Sub) \
@@ -244,6 +245,8 @@ class GraphAssembler {
Node* CEntryStubConstant(int result_size);
Node* ExternalConstant(ExternalReference ref);
+ Node* LoadFramePointer();
+
#define SINGLETON_CONST_DECL(Name) Node* Name();
JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DECL)
#undef SINGLETON_CONST_DECL
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
deleted file mode 100644
index df0160d46c..0000000000
--- a/deps/v8/src/compiler/graph-replay.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-replay.h"
-
-#include "src/compiler/all-nodes.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#ifdef DEBUG
-
-void GraphReplayPrinter::PrintReplay(Graph* graph) {
- GraphReplayPrinter replay;
- PrintF(" Node* nil = graph()->NewNode(common()->Dead());\n");
- Zone zone(graph->zone()->allocator(), ZONE_NAME);
- AllNodes nodes(&zone, graph);
-
- // Allocate the nodes first.
- for (Node* node : nodes.reachable) {
- PrintReplayOpCreator(node->op());
- PrintF(" Node* n%d = graph()->NewNode(op", node->id());
- for (int i = 0; i < node->InputCount(); ++i) {
- PrintF(", nil");
- }
- PrintF("); USE(n%d);\n", node->id());
- }
-
- // Connect the nodes to their inputs.
- for (Node* node : nodes.reachable) {
- for (int i = 0; i < node->InputCount(); i++) {
- PrintF(" n%d->ReplaceInput(%d, n%d);\n", node->id(), i,
- node->InputAt(i)->id());
- }
- }
-}
-
-
-void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
- IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
- const char* builder = IrOpcode::IsCommonOpcode(opcode) ? "common" : "js";
- const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
- ? IrOpcode::Mnemonic(opcode)
- : IrOpcode::Mnemonic(opcode) + 2;
- PrintF(" op = %s()->%s(", builder, mnemonic);
- switch (opcode) {
- case IrOpcode::kParameter:
- PrintF("%d", ParameterIndexOf(op));
- break;
- case IrOpcode::kNumberConstant:
- PrintF("%g", OpParameter<double>(op));
- break;
- case IrOpcode::kHeapConstant:
- PrintF("unique_constant");
- break;
- case IrOpcode::kPhi:
- PrintF("kMachAnyTagged, %d", op->ValueInputCount());
- break;
- case IrOpcode::kStateValues:
- PrintF("%d", op->ValueInputCount());
- break;
- case IrOpcode::kEffectPhi:
- PrintF("%d", op->EffectInputCount());
- break;
- case IrOpcode::kLoop:
- case IrOpcode::kMerge:
- PrintF("%d", op->ControlInputCount());
- break;
- case IrOpcode::kStart:
- PrintF("%d", op->ValueOutputCount() - 3);
- break;
- case IrOpcode::kFrameState:
- PrintF("JS_FRAME, BailoutId(-1), OutputFrameStateCombine::Ignore()");
- break;
- default:
- break;
- }
- PrintF(");\n");
-}
-
-#endif // DEBUG
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/graph-replay.h b/deps/v8/src/compiler/graph-replay.h
deleted file mode 100644
index be89ebd045..0000000000
--- a/deps/v8/src/compiler/graph-replay.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_REPLAY_H_
-#define V8_COMPILER_GRAPH_REPLAY_H_
-
-#include "src/compiler/node.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class Graph;
-
-// Helper class to print a full replay of a graph. This replay can be used to
-// materialize the same graph within a C++ unit test and hence test subsequent
-// optimization passes on a graph without going through the construction steps.
-class GraphReplayPrinter {
- public:
-#ifdef DEBUG
- static void PrintReplay(Graph* graph);
-#else
- static void PrintReplay(Graph* graph) {}
-#endif
-
- private:
- GraphReplayPrinter() {}
-
- static void PrintReplayOpCreator(const Operator* op);
-
- DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 6fb7cfa644..1e861c7b15 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -104,59 +104,6 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
return NewNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12, Node* n13) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12, Node* n13, Node* n14) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7,
- n8, n9, n10, n11, n12, n13, n14};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12, Node* n13, Node* n14, Node* n15) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8,
- n9, n10, n11, n12, n13, n14, n15};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
- Node* n16) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8,
- n9, n10, n11, n12, n13, n14, n15, n16};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
- Node* n16, Node* n17) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9,
- n10, n11, n12, n13, n14, n15, n16, n17};
- return NewNode(op, arraysize(nodes), nodes);
- }
// Clone the {node}, and assign a new node id to the copy.
Node* CloneNode(const Node* node);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 369699067e..9dbf19c3f5 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -760,6 +760,33 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(esp, Immediate(kDoubleSize)); \
} while (false)
+#define ASSEMBLE_BINOP(asm_instr) \
+ do { \
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.InputRegister(0), right); \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov_inst(eax, i.MemoryOperand(1)); \
+ __ mov_inst(i.TempRegister(0), Operand(eax)); \
+ __ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
+ __ lock(); \
+ __ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
+ __ j(not_equal, &binop); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -1130,18 +1157,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kIA32Add:
- if (HasImmediateInput(instr, 1)) {
- __ add(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ add(i.InputRegister(0), i.InputOperand(1));
- }
+ ASSEMBLE_BINOP(add);
break;
case kIA32And:
- if (HasImmediateInput(instr, 1)) {
- __ and_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ and_(i.InputRegister(0), i.InputOperand(1));
- }
+ ASSEMBLE_BINOP(and_);
break;
case kIA32Cmp:
ASSEMBLE_COMPARE(cmp);
@@ -1189,25 +1208,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ neg(i.OutputOperand());
break;
case kIA32Or:
- if (HasImmediateInput(instr, 1)) {
- __ or_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ or_(i.InputRegister(0), i.InputOperand(1));
- }
+ ASSEMBLE_BINOP(or_);
break;
case kIA32Xor:
- if (HasImmediateInput(instr, 1)) {
- __ xor_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ xor_(i.InputRegister(0), i.InputOperand(1));
- }
+ ASSEMBLE_BINOP(xor_);
break;
case kIA32Sub:
- if (HasImmediateInput(instr, 1)) {
- __ sub(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ sub(i.InputRegister(0), i.InputOperand(1));
- }
+ ASSEMBLE_BINOP(sub);
break;
case kIA32Shl:
if (HasImmediateInput(instr, 1)) {
@@ -1614,10 +1621,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kSSEFloat64InsertLowWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0, true);
break;
case kSSEFloat64InsertHighWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1, true);
break;
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1888,22 +1895,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kIA32Xchgb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_b(i.InputRegister(index), operand);
+ case kIA32I32x4Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movd(dst, i.InputOperand(0));
+ __ pshufd(dst, dst, 0x0);
break;
}
- case kIA32Xchgw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_w(i.InputRegister(index), operand);
+ case kIA32I32x4ExtractLane: {
+ __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kIA32Xchgl: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg(i.InputRegister(index), operand);
+ case kIA32I32x4ReplaceLane: {
+ __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kSSEI32x4Add: {
+ __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kSSEI32x4Sub: {
+ __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI32x4Add: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXI32x4Sub: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kCheckedLoadInt8:
@@ -1952,6 +1975,90 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicExchangeInt8: {
+ __ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
+ __ movsx_b(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeUint8: {
+ __ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzx_b(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeInt16: {
+ __ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
+ __ movsx_w(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeUint16: {
+ __ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzx_w(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeWord32: {
+ __ xchg(i.InputRegister(0), i.MemoryOperand(1));
+ break;
+ }
+ case kAtomicCompareExchangeInt8: {
+ __ lock();
+ __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
+ __ movsx_b(eax, eax);
+ break;
+ }
+ case kAtomicCompareExchangeUint8: {
+ __ lock();
+ __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_b(eax, eax);
+ break;
+ }
+ case kAtomicCompareExchangeInt16: {
+ __ lock();
+ __ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
+ __ movsx_w(eax, eax);
+ break;
+ }
+ case kAtomicCompareExchangeUint16: {
+ __ lock();
+ __ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_w(eax, eax);
+ break;
+ }
+ case kAtomicCompareExchangeWord32: {
+ __ lock();
+ __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
+ break;
+ }
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kAtomic##op##Int8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movsx_b(eax, eax); \
+ break; \
+ } \
+ case kAtomic##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(eax, eax); \
+ break; \
+ } \
+ case kAtomic##op##Int16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movsx_w(eax, eax); \
+ break; \
+ } \
+ case kAtomic##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(eax, eax); \
+ break; \
+ } \
+ case kAtomic##op##Word32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ break; \
+ }
+ ATOMIC_BINOP_CASE(Add, add)
+ ATOMIC_BINOP_CASE(Sub, sub)
+ ATOMIC_BINOP_CASE(And, and_)
+ ATOMIC_BINOP_CASE(Or, or_)
+ ATOMIC_BINOP_CASE(Xor, xor_)
+#undef ATOMIC_BINOP_CASE
case kAtomicLoadInt8:
case kAtomicLoadUint8:
case kAtomicLoadInt16:
@@ -2179,7 +2286,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2423,6 +2532,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 7cf0a11045..8bdfd0988d 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -111,9 +111,13 @@ namespace compiler {
V(IA32PushFloat64) \
V(IA32Poke) \
V(IA32StackCheck) \
- V(IA32Xchgb) \
- V(IA32Xchgw) \
- V(IA32Xchgl)
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(IA32I32x4ReplaceLane) \
+ V(SSEI32x4Add) \
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Add) \
+ V(AVXI32x4Sub)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 3216b1de0b..68db94fcff 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kIA32I32x4Splat:
+ case kIA32I32x4ExtractLane:
+ case kIA32I32x4ReplaceLane:
+ case kSSEI32x4Add:
+ case kSSEI32x4Sub:
+ case kAVXI32x4Add:
+ case kAVXI32x4Sub:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -128,11 +135,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Poke:
return kHasSideEffect;
- case kIA32Xchgb:
- case kIA32Xchgw:
- case kIA32Xchgl:
- return kIsLoadOperation | kHasSideEffect;
-
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index a5f72c70b2..6fd1ad5656 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -39,6 +39,11 @@ class IA32OperandGenerator final : public OperandGenerator {
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
+ case kIA32And:
+ case kIA32Or:
+ case kIA32Xor:
+ case kIA32Add:
+ case kIA32Sub:
case kIA32Cmp:
case kIA32Test:
return rep == MachineRepresentation::kWord32 ||
@@ -533,7 +538,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand inputs[4];
+ InstructionOperand inputs[6];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
@@ -554,12 +559,26 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(left);
inputs[input_count++] = g.UseImmediate(right);
} else {
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
+ g.CanBeBetterLeftOperand(right) &&
+ (!g.CanBeBetterLeftOperand(left) ||
+ !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
std::swap(left, right);
}
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.Use(right);
+ if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
+ inputs[input_count++] = g.UseRegister(left);
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ } else {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
}
if (cont->IsBranch()) {
@@ -873,7 +892,9 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
+ V(I32x4Add, kAVXI32x4Add, kSSEI32x4Add) \
+ V(I32x4Sub, kAVXI32x4Sub, kSSEI32x4Sub)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
@@ -1688,13 +1709,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kIA32Xchgb;
+ opcode = kAtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kIA32Xchgw;
+ opcode = kAtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kIA32Xchgl;
+ opcode = kAtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -1703,6 +1724,11 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
+ if (rep == MachineRepresentation::kWord8) {
+ inputs[input_count++] = g.UseByteRegister(value);
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ }
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
@@ -1711,11 +1737,193 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
- inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ inputs[input_count++] = g.UseFixed(value, edx);
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ }
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ outputs[0] = g.DefineAsFixed(node, edx);
+ } else {
+ outputs[0] = g.DefineSameAsFirst(node);
+ }
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseFixed(old_value, eax);
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ inputs[input_count++] = g.UseByteRegister(new_value);
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ }
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, eax);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitAtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ inputs[input_count++] = g.UseByteRegister(value);
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ }
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, eax);
+ InstructionOperand temp[1];
+ temp[0] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 1, temp);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitAtomic##op(Node* node) { \
+ VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
+ kAtomic##op##Int16, kAtomic##op##Uint16, \
+ kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitI32x4Splat(Node* node) {
+ VisitRO(this, node, kIA32I32x4Splat);
+}
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
+ IA32OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kIA32I32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
+ IA32OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kIA32I32x4ReplaceLane, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 360069c5d5..d4e0449ad9 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -89,6 +89,41 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(AtomicStoreWord8) \
V(AtomicStoreWord16) \
V(AtomicStoreWord32) \
+ V(AtomicExchangeInt8) \
+ V(AtomicExchangeUint8) \
+ V(AtomicExchangeInt16) \
+ V(AtomicExchangeUint16) \
+ V(AtomicExchangeWord32) \
+ V(AtomicCompareExchangeInt8) \
+ V(AtomicCompareExchangeUint8) \
+ V(AtomicCompareExchangeInt16) \
+ V(AtomicCompareExchangeUint16) \
+ V(AtomicCompareExchangeWord32) \
+ V(AtomicAddInt8) \
+ V(AtomicAddUint8) \
+ V(AtomicAddInt16) \
+ V(AtomicAddUint16) \
+ V(AtomicAddWord32) \
+ V(AtomicSubInt8) \
+ V(AtomicSubUint8) \
+ V(AtomicSubInt16) \
+ V(AtomicSubUint16) \
+ V(AtomicSubWord32) \
+ V(AtomicAndInt8) \
+ V(AtomicAndUint8) \
+ V(AtomicAndInt16) \
+ V(AtomicAndUint16) \
+ V(AtomicAndWord32) \
+ V(AtomicOrInt8) \
+ V(AtomicOrUint8) \
+ V(AtomicOrInt16) \
+ V(AtomicOrUint16) \
+ V(AtomicOrWord32) \
+ V(AtomicXorInt8) \
+ V(AtomicXorUint8) \
+ V(AtomicXorInt16) \
+ V(AtomicXorUint16) \
+ V(AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 8ba287b1bc..cb3c2d66c6 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -326,6 +326,43 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kAtomicStoreWord32:
return kHasSideEffect;
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 57b6028a1b..a9b935d5b6 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -6,6 +6,7 @@
#include <limits>
+#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/instruction-selector-impl.h"
@@ -453,7 +454,8 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
return g->UseImmediate(input);
}
- case IrOpcode::kArgumentsObjectState:
+ case IrOpcode::kArgumentsElementsState:
+ case IrOpcode::kArgumentsLengthState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
@@ -509,8 +511,12 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
switch (input->opcode()) {
- case IrOpcode::kArgumentsObjectState: {
- values->PushArguments();
+ case IrOpcode::kArgumentsElementsState: {
+ values->PushArgumentsElements(IsRestOf(input->op()));
+ return 0;
+ }
+ case IrOpcode::kArgumentsLengthState: {
+ values->PushArgumentsLength(IsRestOf(input->op()));
return 0;
}
case IrOpcode::kObjectState: {
@@ -676,12 +682,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->output_nodes.push_back(call);
} else {
buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
- for (auto use : call->uses()) {
- if (use->opcode() != IrOpcode::kProjection) continue;
- size_t const index = ProjectionIndexOf(use->op());
+ for (Edge const edge : call->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ DCHECK(edge.from()->opcode() == IrOpcode::kProjection);
+ size_t const index = ProjectionIndexOf(edge.from()->op());
DCHECK_LT(index, buffer->output_nodes.size());
DCHECK(!buffer->output_nodes[index]);
- buffer->output_nodes[index] = use;
+ buffer->output_nodes[index] = edge.from();
}
}
@@ -982,7 +989,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
- return VisitThrow(input->InputAt(0));
+ return VisitThrow(input);
case BasicBlock::kNone: {
// Exit block doesn't have control.
DCHECK_NULL(input);
@@ -1119,6 +1126,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitWord32ReverseBits(node);
case IrOpcode::kWord32ReverseBytes:
return MarkAsWord32(node), VisitWord32ReverseBytes(node);
+ case IrOpcode::kInt32AbsWithOverflow:
+ return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
case IrOpcode::kWord32Popcnt:
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
@@ -1145,6 +1154,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64ReverseBytes:
return MarkAsWord64(node), VisitWord64ReverseBytes(node);
+ case IrOpcode::kInt64AbsWithOverflow:
+ return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -1223,6 +1234,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kChangeFloat64ToUint64:
+ return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
case IrOpcode::kFloat64SilenceNaN:
MarkAsFloat64(node);
if (CanProduceSignalingNaN(node->InputAt(0))) {
@@ -1449,6 +1462,20 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kAtomicStore:
return VisitAtomicStore(node);
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kAtomic##name: { \
+ MachineType type = AtomicOpRepresentationOf(node->op()); \
+ MarkAsRepresentation(type.representation(), node); \
+ return VisitAtomic##name(node); \
+ }
+ ATOMIC_CASE(Exchange)
+ ATOMIC_CASE(CompareExchange)
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+#undef ATOMIC_CASE
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1457,182 +1484,268 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
- case IrOpcode::kCreateFloat32x4:
- return MarkAsSimd128(node), VisitCreateFloat32x4(node);
- case IrOpcode::kFloat32x4ExtractLane:
- return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
- case IrOpcode::kFloat32x4ReplaceLane:
- return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
- case IrOpcode::kFloat32x4FromInt32x4:
- return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
- case IrOpcode::kFloat32x4FromUint32x4:
- return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
- case IrOpcode::kFloat32x4Abs:
- return MarkAsSimd128(node), VisitFloat32x4Abs(node);
- case IrOpcode::kFloat32x4Neg:
- return MarkAsSimd128(node), VisitFloat32x4Neg(node);
- case IrOpcode::kFloat32x4Add:
- return MarkAsSimd128(node), VisitFloat32x4Add(node);
- case IrOpcode::kFloat32x4Sub:
- return MarkAsSimd128(node), VisitFloat32x4Sub(node);
- case IrOpcode::kFloat32x4Equal:
- return MarkAsSimd1x4(node), VisitFloat32x4Equal(node);
- case IrOpcode::kFloat32x4NotEqual:
- return MarkAsSimd1x4(node), VisitFloat32x4NotEqual(node);
- case IrOpcode::kCreateInt32x4:
- return MarkAsSimd128(node), VisitCreateInt32x4(node);
- case IrOpcode::kInt32x4ExtractLane:
- return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
- case IrOpcode::kInt32x4ReplaceLane:
- return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
- case IrOpcode::kInt32x4FromFloat32x4:
- return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
- case IrOpcode::kUint32x4FromFloat32x4:
- return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
- case IrOpcode::kInt32x4Neg:
- return MarkAsSimd128(node), VisitInt32x4Neg(node);
- case IrOpcode::kInt32x4ShiftLeftByScalar:
- return MarkAsSimd128(node), VisitInt32x4ShiftLeftByScalar(node);
- case IrOpcode::kInt32x4ShiftRightByScalar:
- return MarkAsSimd128(node), VisitInt32x4ShiftRightByScalar(node);
- case IrOpcode::kInt32x4Add:
- return MarkAsSimd128(node), VisitInt32x4Add(node);
- case IrOpcode::kInt32x4Sub:
- return MarkAsSimd128(node), VisitInt32x4Sub(node);
- case IrOpcode::kInt32x4Mul:
- return MarkAsSimd128(node), VisitInt32x4Mul(node);
- case IrOpcode::kInt32x4Min:
- return MarkAsSimd128(node), VisitInt32x4Min(node);
- case IrOpcode::kInt32x4Max:
- return MarkAsSimd128(node), VisitInt32x4Max(node);
- case IrOpcode::kInt32x4Equal:
- return MarkAsSimd1x4(node), VisitInt32x4Equal(node);
- case IrOpcode::kInt32x4NotEqual:
- return MarkAsSimd1x4(node), VisitInt32x4NotEqual(node);
- case IrOpcode::kInt32x4GreaterThan:
- return MarkAsSimd1x4(node), VisitInt32x4GreaterThan(node);
- case IrOpcode::kInt32x4GreaterThanOrEqual:
- return MarkAsSimd1x4(node), VisitInt32x4GreaterThanOrEqual(node);
- case IrOpcode::kUint32x4ShiftRightByScalar:
- return MarkAsSimd128(node), VisitUint32x4ShiftRightByScalar(node);
- case IrOpcode::kUint32x4Min:
- return MarkAsSimd128(node), VisitUint32x4Min(node);
- case IrOpcode::kUint32x4Max:
- return MarkAsSimd128(node), VisitUint32x4Max(node);
- case IrOpcode::kUint32x4GreaterThan:
- return MarkAsSimd1x4(node), VisitUint32x4GreaterThan(node);
- case IrOpcode::kUint32x4GreaterThanOrEqual:
- return MarkAsSimd1x4(node), VisitUint32x4GreaterThanOrEqual(node);
- case IrOpcode::kCreateInt16x8:
- return MarkAsSimd128(node), VisitCreateInt16x8(node);
- case IrOpcode::kInt16x8ExtractLane:
- return MarkAsWord32(node), VisitInt16x8ExtractLane(node);
- case IrOpcode::kInt16x8ReplaceLane:
- return MarkAsSimd128(node), VisitInt16x8ReplaceLane(node);
- case IrOpcode::kInt16x8Neg:
- return MarkAsSimd128(node), VisitInt16x8Neg(node);
- case IrOpcode::kInt16x8ShiftLeftByScalar:
- return MarkAsSimd128(node), VisitInt16x8ShiftLeftByScalar(node);
- case IrOpcode::kInt16x8ShiftRightByScalar:
- return MarkAsSimd128(node), VisitInt16x8ShiftRightByScalar(node);
- case IrOpcode::kInt16x8Add:
- return MarkAsSimd128(node), VisitInt16x8Add(node);
- case IrOpcode::kInt16x8AddSaturate:
- return MarkAsSimd128(node), VisitInt16x8AddSaturate(node);
- case IrOpcode::kInt16x8Sub:
- return MarkAsSimd128(node), VisitInt16x8Sub(node);
- case IrOpcode::kInt16x8SubSaturate:
- return MarkAsSimd128(node), VisitInt16x8SubSaturate(node);
- case IrOpcode::kInt16x8Mul:
- return MarkAsSimd128(node), VisitInt16x8Mul(node);
- case IrOpcode::kInt16x8Min:
- return MarkAsSimd128(node), VisitInt16x8Min(node);
- case IrOpcode::kInt16x8Max:
- return MarkAsSimd128(node), VisitInt16x8Max(node);
- case IrOpcode::kInt16x8Equal:
- return MarkAsSimd1x8(node), VisitInt16x8Equal(node);
- case IrOpcode::kInt16x8NotEqual:
- return MarkAsSimd1x8(node), VisitInt16x8NotEqual(node);
- case IrOpcode::kInt16x8GreaterThan:
- return MarkAsSimd1x8(node), VisitInt16x8GreaterThan(node);
- case IrOpcode::kInt16x8GreaterThanOrEqual:
- return MarkAsSimd1x8(node), VisitInt16x8GreaterThanOrEqual(node);
- case IrOpcode::kUint16x8ShiftRightByScalar:
- return MarkAsSimd128(node), VisitUint16x8ShiftRightByScalar(node);
- case IrOpcode::kUint16x8AddSaturate:
- return MarkAsSimd128(node), VisitUint16x8AddSaturate(node);
- case IrOpcode::kUint16x8SubSaturate:
- return MarkAsSimd128(node), VisitUint16x8SubSaturate(node);
- case IrOpcode::kUint16x8Min:
- return MarkAsSimd128(node), VisitUint16x8Min(node);
- case IrOpcode::kUint16x8Max:
- return MarkAsSimd128(node), VisitUint16x8Max(node);
- case IrOpcode::kUint16x8GreaterThan:
- return MarkAsSimd1x8(node), VisitUint16x8GreaterThan(node);
- case IrOpcode::kUint16x8GreaterThanOrEqual:
- return MarkAsSimd1x8(node), VisitUint16x8GreaterThanOrEqual(node);
- case IrOpcode::kCreateInt8x16:
- return MarkAsSimd128(node), VisitCreateInt8x16(node);
- case IrOpcode::kInt8x16ExtractLane:
- return MarkAsWord32(node), VisitInt8x16ExtractLane(node);
- case IrOpcode::kInt8x16ReplaceLane:
- return MarkAsSimd128(node), VisitInt8x16ReplaceLane(node);
- case IrOpcode::kInt8x16Neg:
- return MarkAsSimd128(node), VisitInt8x16Neg(node);
- case IrOpcode::kInt8x16ShiftLeftByScalar:
- return MarkAsSimd128(node), VisitInt8x16ShiftLeftByScalar(node);
- case IrOpcode::kInt8x16ShiftRightByScalar:
- return MarkAsSimd128(node), VisitInt8x16ShiftRightByScalar(node);
- case IrOpcode::kInt8x16Add:
- return MarkAsSimd128(node), VisitInt8x16Add(node);
- case IrOpcode::kInt8x16AddSaturate:
- return MarkAsSimd128(node), VisitInt8x16AddSaturate(node);
- case IrOpcode::kInt8x16Sub:
- return MarkAsSimd128(node), VisitInt8x16Sub(node);
- case IrOpcode::kInt8x16SubSaturate:
- return MarkAsSimd128(node), VisitInt8x16SubSaturate(node);
- case IrOpcode::kInt8x16Mul:
- return MarkAsSimd128(node), VisitInt8x16Mul(node);
- case IrOpcode::kInt8x16Min:
- return MarkAsSimd128(node), VisitInt8x16Min(node);
- case IrOpcode::kInt8x16Max:
- return MarkAsSimd128(node), VisitInt8x16Max(node);
- case IrOpcode::kInt8x16Equal:
- return MarkAsSimd1x16(node), VisitInt8x16Equal(node);
- case IrOpcode::kInt8x16NotEqual:
- return MarkAsSimd1x16(node), VisitInt8x16NotEqual(node);
- case IrOpcode::kInt8x16GreaterThan:
- return MarkAsSimd1x16(node), VisitInt8x16GreaterThan(node);
- case IrOpcode::kInt8x16GreaterThanOrEqual:
- return MarkAsSimd1x16(node), VisitInt8x16GreaterThanOrEqual(node);
- case IrOpcode::kUint8x16ShiftRightByScalar:
- return MarkAsSimd128(node), VisitUint8x16ShiftRightByScalar(node);
- case IrOpcode::kUint8x16AddSaturate:
- return MarkAsSimd128(node), VisitUint8x16AddSaturate(node);
- case IrOpcode::kUint8x16SubSaturate:
- return MarkAsSimd128(node), VisitUint8x16SubSaturate(node);
- case IrOpcode::kUint8x16Min:
- return MarkAsSimd128(node), VisitUint8x16Min(node);
- case IrOpcode::kUint8x16Max:
- return MarkAsSimd128(node), VisitUint8x16Max(node);
- case IrOpcode::kUint8x16GreaterThan:
- return MarkAsSimd1x16(node), VisitUint8x16GreaterThan(node);
- case IrOpcode::kUint8x16GreaterThanOrEqual:
- return MarkAsSimd1x16(node), VisitUint16x8GreaterThanOrEqual(node);
- case IrOpcode::kSimd128And:
- return MarkAsSimd128(node), VisitSimd128And(node);
- case IrOpcode::kSimd128Or:
- return MarkAsSimd128(node), VisitSimd128Or(node);
- case IrOpcode::kSimd128Xor:
- return MarkAsSimd128(node), VisitSimd128Xor(node);
- case IrOpcode::kSimd128Not:
- return MarkAsSimd128(node), VisitSimd128Not(node);
- case IrOpcode::kSimd32x4Select:
- return MarkAsSimd128(node), VisitSimd32x4Select(node);
- case IrOpcode::kSimd16x8Select:
- return MarkAsSimd128(node), VisitSimd16x8Select(node);
- case IrOpcode::kSimd8x16Select:
- return MarkAsSimd128(node), VisitSimd8x16Select(node);
+ case IrOpcode::kF32x4Splat:
+ return MarkAsSimd128(node), VisitF32x4Splat(node);
+ case IrOpcode::kF32x4ExtractLane:
+ return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
+ case IrOpcode::kF32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
+ case IrOpcode::kF32x4SConvertI32x4:
+ return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
+ case IrOpcode::kF32x4UConvertI32x4:
+ return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
+ case IrOpcode::kF32x4Abs:
+ return MarkAsSimd128(node), VisitF32x4Abs(node);
+ case IrOpcode::kF32x4Neg:
+ return MarkAsSimd128(node), VisitF32x4Neg(node);
+ case IrOpcode::kF32x4RecipApprox:
+ return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
+ case IrOpcode::kF32x4RecipRefine:
+ return MarkAsSimd128(node), VisitF32x4RecipRefine(node);
+ case IrOpcode::kF32x4Add:
+ return MarkAsSimd128(node), VisitF32x4Add(node);
+ case IrOpcode::kF32x4Sub:
+ return MarkAsSimd128(node), VisitF32x4Sub(node);
+ case IrOpcode::kF32x4Mul:
+ return MarkAsSimd128(node), VisitF32x4Mul(node);
+ case IrOpcode::kF32x4Min:
+ return MarkAsSimd128(node), VisitF32x4Min(node);
+ case IrOpcode::kF32x4Max:
+ return MarkAsSimd128(node), VisitF32x4Max(node);
+ case IrOpcode::kF32x4RecipSqrtApprox:
+ return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
+ case IrOpcode::kF32x4RecipSqrtRefine:
+ return MarkAsSimd128(node), VisitF32x4RecipSqrtRefine(node);
+ case IrOpcode::kF32x4Eq:
+ return MarkAsSimd1x4(node), VisitF32x4Eq(node);
+ case IrOpcode::kF32x4Ne:
+ return MarkAsSimd1x4(node), VisitF32x4Ne(node);
+ case IrOpcode::kF32x4Lt:
+ return MarkAsSimd1x4(node), VisitF32x4Lt(node);
+ case IrOpcode::kF32x4Le:
+ return MarkAsSimd1x4(node), VisitF32x4Le(node);
+ case IrOpcode::kI32x4Splat:
+ return MarkAsSimd128(node), VisitI32x4Splat(node);
+ case IrOpcode::kI32x4ExtractLane:
+ return MarkAsWord32(node), VisitI32x4ExtractLane(node);
+ case IrOpcode::kI32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
+ case IrOpcode::kI32x4SConvertF32x4:
+ return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
+ case IrOpcode::kI32x4SConvertI16x8Low:
+ return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
+ case IrOpcode::kI32x4SConvertI16x8High:
+ return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
+ case IrOpcode::kI32x4Neg:
+ return MarkAsSimd128(node), VisitI32x4Neg(node);
+ case IrOpcode::kI32x4Shl:
+ return MarkAsSimd128(node), VisitI32x4Shl(node);
+ case IrOpcode::kI32x4ShrS:
+ return MarkAsSimd128(node), VisitI32x4ShrS(node);
+ case IrOpcode::kI32x4Add:
+ return MarkAsSimd128(node), VisitI32x4Add(node);
+ case IrOpcode::kI32x4Sub:
+ return MarkAsSimd128(node), VisitI32x4Sub(node);
+ case IrOpcode::kI32x4Mul:
+ return MarkAsSimd128(node), VisitI32x4Mul(node);
+ case IrOpcode::kI32x4MinS:
+ return MarkAsSimd128(node), VisitI32x4MinS(node);
+ case IrOpcode::kI32x4MaxS:
+ return MarkAsSimd128(node), VisitI32x4MaxS(node);
+ case IrOpcode::kI32x4Eq:
+ return MarkAsSimd1x4(node), VisitI32x4Eq(node);
+ case IrOpcode::kI32x4Ne:
+ return MarkAsSimd1x4(node), VisitI32x4Ne(node);
+ case IrOpcode::kI32x4LtS:
+ return MarkAsSimd1x4(node), VisitI32x4LtS(node);
+ case IrOpcode::kI32x4LeS:
+ return MarkAsSimd1x4(node), VisitI32x4LeS(node);
+ case IrOpcode::kI32x4UConvertF32x4:
+ return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
+ case IrOpcode::kI32x4UConvertI16x8Low:
+ return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
+ case IrOpcode::kI32x4UConvertI16x8High:
+ return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
+ case IrOpcode::kI32x4ShrU:
+ return MarkAsSimd128(node), VisitI32x4ShrU(node);
+ case IrOpcode::kI32x4MinU:
+ return MarkAsSimd128(node), VisitI32x4MinU(node);
+ case IrOpcode::kI32x4MaxU:
+ return MarkAsSimd128(node), VisitI32x4MaxU(node);
+ case IrOpcode::kI32x4LtU:
+ return MarkAsSimd1x4(node), VisitI32x4LtU(node);
+ case IrOpcode::kI32x4LeU:
+ return MarkAsSimd1x4(node), VisitI32x4LeU(node);
+ case IrOpcode::kI16x8Splat:
+ return MarkAsSimd128(node), VisitI16x8Splat(node);
+ case IrOpcode::kI16x8ExtractLane:
+ return MarkAsWord32(node), VisitI16x8ExtractLane(node);
+ case IrOpcode::kI16x8ReplaceLane:
+ return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
+ case IrOpcode::kI16x8SConvertI8x16Low:
+ return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
+ case IrOpcode::kI16x8SConvertI8x16High:
+ return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
+ case IrOpcode::kI16x8Neg:
+ return MarkAsSimd128(node), VisitI16x8Neg(node);
+ case IrOpcode::kI16x8Shl:
+ return MarkAsSimd128(node), VisitI16x8Shl(node);
+ case IrOpcode::kI16x8ShrS:
+ return MarkAsSimd128(node), VisitI16x8ShrS(node);
+ case IrOpcode::kI16x8SConvertI32x4:
+ return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
+ case IrOpcode::kI16x8Add:
+ return MarkAsSimd128(node), VisitI16x8Add(node);
+ case IrOpcode::kI16x8AddSaturateS:
+ return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
+ case IrOpcode::kI16x8Sub:
+ return MarkAsSimd128(node), VisitI16x8Sub(node);
+ case IrOpcode::kI16x8SubSaturateS:
+ return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
+ case IrOpcode::kI16x8Mul:
+ return MarkAsSimd128(node), VisitI16x8Mul(node);
+ case IrOpcode::kI16x8MinS:
+ return MarkAsSimd128(node), VisitI16x8MinS(node);
+ case IrOpcode::kI16x8MaxS:
+ return MarkAsSimd128(node), VisitI16x8MaxS(node);
+ case IrOpcode::kI16x8Eq:
+ return MarkAsSimd1x8(node), VisitI16x8Eq(node);
+ case IrOpcode::kI16x8Ne:
+ return MarkAsSimd1x8(node), VisitI16x8Ne(node);
+ case IrOpcode::kI16x8LtS:
+ return MarkAsSimd1x8(node), VisitI16x8LtS(node);
+ case IrOpcode::kI16x8LeS:
+ return MarkAsSimd1x8(node), VisitI16x8LeS(node);
+ case IrOpcode::kI16x8UConvertI8x16Low:
+ return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
+ case IrOpcode::kI16x8UConvertI8x16High:
+ return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
+ case IrOpcode::kI16x8ShrU:
+ return MarkAsSimd128(node), VisitI16x8ShrU(node);
+ case IrOpcode::kI16x8UConvertI32x4:
+ return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
+ case IrOpcode::kI16x8AddSaturateU:
+ return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
+ case IrOpcode::kI16x8SubSaturateU:
+ return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
+ case IrOpcode::kI16x8MinU:
+ return MarkAsSimd128(node), VisitI16x8MinU(node);
+ case IrOpcode::kI16x8MaxU:
+ return MarkAsSimd128(node), VisitI16x8MaxU(node);
+ case IrOpcode::kI16x8LtU:
+ return MarkAsSimd1x8(node), VisitI16x8LtU(node);
+ case IrOpcode::kI16x8LeU:
+ return MarkAsSimd1x8(node), VisitI16x8LeU(node);
+ case IrOpcode::kI8x16Splat:
+ return MarkAsSimd128(node), VisitI8x16Splat(node);
+ case IrOpcode::kI8x16ExtractLane:
+ return MarkAsWord32(node), VisitI8x16ExtractLane(node);
+ case IrOpcode::kI8x16ReplaceLane:
+ return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
+ case IrOpcode::kI8x16Neg:
+ return MarkAsSimd128(node), VisitI8x16Neg(node);
+ case IrOpcode::kI8x16Shl:
+ return MarkAsSimd128(node), VisitI8x16Shl(node);
+ case IrOpcode::kI8x16ShrS:
+ return MarkAsSimd128(node), VisitI8x16ShrS(node);
+ case IrOpcode::kI8x16SConvertI16x8:
+ return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
+ case IrOpcode::kI8x16Add:
+ return MarkAsSimd128(node), VisitI8x16Add(node);
+ case IrOpcode::kI8x16AddSaturateS:
+ return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
+ case IrOpcode::kI8x16Sub:
+ return MarkAsSimd128(node), VisitI8x16Sub(node);
+ case IrOpcode::kI8x16SubSaturateS:
+ return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
+ case IrOpcode::kI8x16Mul:
+ return MarkAsSimd128(node), VisitI8x16Mul(node);
+ case IrOpcode::kI8x16MinS:
+ return MarkAsSimd128(node), VisitI8x16MinS(node);
+ case IrOpcode::kI8x16MaxS:
+ return MarkAsSimd128(node), VisitI8x16MaxS(node);
+ case IrOpcode::kI8x16Eq:
+ return MarkAsSimd1x16(node), VisitI8x16Eq(node);
+ case IrOpcode::kI8x16Ne:
+ return MarkAsSimd1x16(node), VisitI8x16Ne(node);
+ case IrOpcode::kI8x16LtS:
+ return MarkAsSimd1x16(node), VisitI8x16LtS(node);
+ case IrOpcode::kI8x16LeS:
+ return MarkAsSimd1x16(node), VisitI8x16LeS(node);
+ case IrOpcode::kI8x16ShrU:
+ return MarkAsSimd128(node), VisitI8x16ShrU(node);
+ case IrOpcode::kI8x16UConvertI16x8:
+ return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
+ case IrOpcode::kI8x16AddSaturateU:
+ return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
+ case IrOpcode::kI8x16SubSaturateU:
+ return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
+ case IrOpcode::kI8x16MinU:
+ return MarkAsSimd128(node), VisitI8x16MinU(node);
+ case IrOpcode::kI8x16MaxU:
+ return MarkAsSimd128(node), VisitI8x16MaxU(node);
+ case IrOpcode::kI8x16LtU:
+ return MarkAsSimd1x16(node), VisitI8x16LtU(node);
+ case IrOpcode::kI8x16LeU:
+ return MarkAsSimd1x16(node), VisitI16x8LeU(node);
+ case IrOpcode::kS128Zero:
+ return MarkAsSimd128(node), VisitS128Zero(node);
+ case IrOpcode::kS128And:
+ return MarkAsSimd128(node), VisitS128And(node);
+ case IrOpcode::kS128Or:
+ return MarkAsSimd128(node), VisitS128Or(node);
+ case IrOpcode::kS128Xor:
+ return MarkAsSimd128(node), VisitS128Xor(node);
+ case IrOpcode::kS128Not:
+ return MarkAsSimd128(node), VisitS128Not(node);
+ case IrOpcode::kS32x4Select:
+ return MarkAsSimd128(node), VisitS32x4Select(node);
+ case IrOpcode::kS16x8Select:
+ return MarkAsSimd128(node), VisitS16x8Select(node);
+ case IrOpcode::kS8x16Select:
+ return MarkAsSimd128(node), VisitS8x16Select(node);
+ case IrOpcode::kS1x4Zero:
+ return MarkAsSimd1x4(node), VisitS1x4Zero(node);
+ case IrOpcode::kS1x4And:
+ return MarkAsSimd1x4(node), VisitS1x4And(node);
+ case IrOpcode::kS1x4Or:
+ return MarkAsSimd1x4(node), VisitS1x4Or(node);
+ case IrOpcode::kS1x4Xor:
+ return MarkAsSimd1x4(node), VisitS1x4Xor(node);
+ case IrOpcode::kS1x4Not:
+ return MarkAsSimd1x4(node), VisitS1x4Not(node);
+ case IrOpcode::kS1x4AnyTrue:
+ return MarkAsWord32(node), VisitS1x4AnyTrue(node);
+ case IrOpcode::kS1x4AllTrue:
+ return MarkAsWord32(node), VisitS1x4AllTrue(node);
+ case IrOpcode::kS1x8Zero:
+ return MarkAsSimd1x8(node), VisitS1x8Zero(node);
+ case IrOpcode::kS1x8And:
+ return MarkAsSimd1x8(node), VisitS1x8And(node);
+ case IrOpcode::kS1x8Or:
+ return MarkAsSimd1x8(node), VisitS1x8Or(node);
+ case IrOpcode::kS1x8Xor:
+ return MarkAsSimd1x8(node), VisitS1x8Xor(node);
+ case IrOpcode::kS1x8Not:
+ return MarkAsSimd1x8(node), VisitS1x8Not(node);
+ case IrOpcode::kS1x8AnyTrue:
+ return MarkAsWord32(node), VisitS1x8AnyTrue(node);
+ case IrOpcode::kS1x8AllTrue:
+ return MarkAsWord32(node), VisitS1x8AllTrue(node);
+ case IrOpcode::kS1x16Zero:
+ return MarkAsSimd1x16(node), VisitS1x16Zero(node);
+ case IrOpcode::kS1x16And:
+ return MarkAsSimd1x16(node), VisitS1x16And(node);
+ case IrOpcode::kS1x16Or:
+ return MarkAsSimd1x16(node), VisitS1x16Or(node);
+ case IrOpcode::kS1x16Xor:
+ return MarkAsSimd1x16(node), VisitS1x16Xor(node);
+ case IrOpcode::kS1x16Not:
+ return MarkAsSimd1x16(node), VisitS1x16Not(node);
+ case IrOpcode::kS1x16AnyTrue:
+ return MarkAsWord32(node), VisitS1x16AnyTrue(node);
+ case IrOpcode::kS1x16AllTrue:
+ return MarkAsWord32(node), VisitS1x16AllTrue(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1888,6 +2001,9 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
UNIMPLEMENTED();
@@ -1958,287 +2074,342 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-
-void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+void InstructionSelector::VisitF32x4RecipSqrtRefine(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4Equal(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4RecipRefine(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+#endif // V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
- UNIMPLEMENTED();
-}
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 &&
+ // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt32x4GreaterThan(Node* node) {
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt32x4GreaterThanOrEqual(Node* node) {
+void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
+void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint32x4GreaterThan(Node* node) {
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint32x4GreaterThanOrEqual(Node* node) {
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitCreateInt16x8(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt16x8ExtractLane(Node* node) {
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8ReplaceLane(Node* node) {
+void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt16x8ShiftLeftByScalar(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8ShiftRightByScalar(Node* node) {
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8AddSaturate(Node* node) {
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8SubSaturate(Node* node) {
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8Equal(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8NotEqual(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8LessThanOrEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt16x8GreaterThan(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt16x8GreaterThanOrEqual(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint16x8ShiftRightByScalar(Node* node) {
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint16x8AddSaturate(Node* node) {
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint16x8SubSaturate(Node* node) {
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint16x8Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint16x8Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint16x8GreaterThan(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint16x8GreaterThanOrEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI16x8LeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitCreateInt8x16(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16ExtractLane(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16ReplaceLane(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16ShiftLeftByScalar(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16ShiftRightByScalar(Node* node) {
+void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt8x16Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16AddSaturate(Node* node) {
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt8x16Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16SubSaturate(Node* node) {
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitInt8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16Equal(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16NotEqual(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16LtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16LessThanOrEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16LeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16GreaterThan(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitInt8x16GreaterThanOrEqual(Node* node) {
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint8x16ShiftRightByScalar(Node* node) {
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint8x16AddSaturate(Node* node) {
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitUint8x16SubSaturate(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint8x16Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint8x16Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16LtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint8x16GreaterThan(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16LeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitUint8x16GreaterThanOrEqual(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd128And(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16Zero(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4Xor(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x4Not(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x8And(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x8Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2313,6 +2484,8 @@ void InstructionSelector::VisitProjection(Node* node) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
+ case IrOpcode::kInt32AbsWithOverflow:
+ case IrOpcode::kInt64AbsWithOverflow:
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
@@ -2547,8 +2720,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
}
-
-void InstructionSelector::VisitThrow(Node* value) {
+void InstructionSelector::VisitThrow(Node* node) {
OperandGenerator g(this);
Emit(kArchThrowTerminator, g.NoOutput());
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index d811aa4741..26cc85a81f 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -365,7 +365,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
Node* value);
void VisitReturn(Node* ret);
- void VisitThrow(Node* value);
+ void VisitThrow(Node* node);
void VisitRetain(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
@@ -389,6 +389,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void MarkPairProjectionsAsWord32(Node* node);
bool IsSourcePositionUsed(Node* node);
+ void VisitAtomicBinaryOperation(Node* node, ArchOpcode int8_op,
+ ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index ee7865dec0..bbcd03d3ec 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1125,7 +1125,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant);
class FrameStateDescriptor;
enum class StateValueKind : uint8_t {
- kArguments,
+ kArgumentsElements,
+ kArgumentsLength,
kPlain,
kOptimizedOut,
kNested,
@@ -1135,45 +1136,72 @@ enum class StateValueKind : uint8_t {
class StateValueDescriptor {
public:
StateValueDescriptor()
- : kind_(StateValueKind::kPlain),
- type_(MachineType::AnyTagged()),
- id_(0) {}
+ : kind_(StateValueKind::kPlain), type_(MachineType::AnyTagged()) {}
- static StateValueDescriptor Arguments() {
- return StateValueDescriptor(StateValueKind::kArguments,
- MachineType::AnyTagged(), 0);
+ static StateValueDescriptor ArgumentsElements(bool is_rest) {
+ StateValueDescriptor descr(StateValueKind::kArgumentsElements,
+ MachineType::AnyTagged());
+ descr.is_rest_ = is_rest;
+ return descr;
+ }
+ static StateValueDescriptor ArgumentsLength(bool is_rest) {
+ StateValueDescriptor descr(StateValueKind::kArgumentsLength,
+ MachineType::AnyTagged());
+ descr.is_rest_ = is_rest;
+ return descr;
}
static StateValueDescriptor Plain(MachineType type) {
- return StateValueDescriptor(StateValueKind::kPlain, type, 0);
+ return StateValueDescriptor(StateValueKind::kPlain, type);
}
static StateValueDescriptor OptimizedOut() {
return StateValueDescriptor(StateValueKind::kOptimizedOut,
- MachineType::AnyTagged(), 0);
+ MachineType::AnyTagged());
}
static StateValueDescriptor Recursive(size_t id) {
- return StateValueDescriptor(StateValueKind::kNested,
- MachineType::AnyTagged(), id);
+ StateValueDescriptor descr(StateValueKind::kNested,
+ MachineType::AnyTagged());
+ descr.id_ = id;
+ return descr;
}
static StateValueDescriptor Duplicate(size_t id) {
- return StateValueDescriptor(StateValueKind::kDuplicate,
- MachineType::AnyTagged(), id);
+ StateValueDescriptor descr(StateValueKind::kDuplicate,
+ MachineType::AnyTagged());
+ descr.id_ = id;
+ return descr;
}
- bool IsArguments() const { return kind_ == StateValueKind::kArguments; }
+ bool IsArgumentsElements() const {
+ return kind_ == StateValueKind::kArgumentsElements;
+ }
+ bool IsArgumentsLength() const {
+ return kind_ == StateValueKind::kArgumentsLength;
+ }
bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
bool IsNested() const { return kind_ == StateValueKind::kNested; }
bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
MachineType type() const { return type_; }
- size_t id() const { return id_; }
+ size_t id() const {
+ DCHECK(kind_ == StateValueKind::kDuplicate ||
+ kind_ == StateValueKind::kNested);
+ return id_;
+ }
+ int is_rest() const {
+ DCHECK(kind_ == StateValueKind::kArgumentsElements ||
+ kind_ == StateValueKind::kArgumentsLength);
+ return is_rest_;
+ }
private:
- StateValueDescriptor(StateValueKind kind, MachineType type, size_t id)
- : kind_(kind), type_(type), id_(id) {}
+ StateValueDescriptor(StateValueKind kind, MachineType type)
+ : kind_(kind), type_(type) {}
StateValueKind kind_;
MachineType type_;
- size_t id_;
+ union {
+ size_t id_;
+ bool is_rest_;
+ };
};
class StateValueList {
@@ -1232,7 +1260,12 @@ class StateValueList {
nested_.push_back(nested);
return nested;
}
- void PushArguments() { fields_.push_back(StateValueDescriptor::Arguments()); }
+ void PushArgumentsElements(bool is_rest) {
+ fields_.push_back(StateValueDescriptor::ArgumentsElements(is_rest));
+ }
+ void PushArgumentsLength(bool is_rest) {
+ fields_.push_back(StateValueDescriptor::ArgumentsLength(is_rest));
+ }
void PushDuplicate(size_t id) {
fields_.push_back(StateValueDescriptor::Duplicate(id));
}
@@ -1436,7 +1469,8 @@ std::ostream& operator<<(std::ostream& os,
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
- zone_allocator<std::pair<const int, Constant> > > ConstantMap;
+ ZoneAllocator<std::pair<const int, Constant> > >
+ ConstantMap;
typedef ZoneDeque<Instruction*> InstructionDeque;
typedef ZoneDeque<ReferenceMap*> ReferenceMapDeque;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 06c927289e..82c91cc0eb 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -74,6 +74,8 @@ void Int64Lowering::LowerGraph() {
}
}
+namespace {
+
static int GetParameterIndexAfterLowering(
Signature<MachineRepresentation>* signature, int old_index) {
int result = old_index;
@@ -85,6 +87,19 @@ static int GetParameterIndexAfterLowering(
return result;
}
+int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
+ int result = static_cast<int>(signature->return_count());
+ for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+ if (signature->GetReturn(i) == MachineRepresentation::kWord64) {
+ result++;
+ }
+ }
+ return result;
+}
+
+} // namespace
+
+// static
int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
// GetParameterIndexAfterLowering(parameter_count) returns the parameter count
@@ -93,15 +108,10 @@ int Int64Lowering::GetParameterCountAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
-static int GetReturnCountAfterLowering(
- Signature<MachineRepresentation>* signature) {
- int result = static_cast<int>(signature->return_count());
- for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
- if (signature->GetReturn(i) == MachineRepresentation::kWord64) {
- result++;
- }
- }
- return result;
+// static
+bool Int64Lowering::IsI64AsTwoParameters(MachineOperatorBuilder* machine,
+ MachineRepresentation type) {
+ return machine->Is32() && type == MachineRepresentation::kWord64;
}
void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
@@ -120,14 +130,6 @@ void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
#endif
}
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-const int Int64Lowering::kLowerWordOffset = 0;
-const int Int64Lowering::kHigherWordOffset = 4;
-#elif defined(V8_TARGET_BIG_ENDIAN)
-const int Int64Lowering::kLowerWordOffset = 4;
-const int Int64Lowering::kHigherWordOffset = 0;
-#endif
-
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
@@ -276,10 +278,13 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kReturn: {
+ int input_count = node->InputCount();
DefaultLowering(node);
- int new_return_count = GetReturnCountAfterLowering(signature());
- if (static_cast<int>(signature()->return_count()) != new_return_count) {
- NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+ if (input_count != node->InputCount()) {
+ int new_return_count = GetReturnCountAfterLowering(signature());
+ if (static_cast<int>(signature()->return_count()) != new_return_count) {
+ NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+ }
}
break;
}
@@ -561,7 +566,8 @@ void Int64Lowering::LowerNode(Node* node) {
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
stack_slot,
- graph()->NewNode(common()->Int32Constant(kHigherWordOffset)),
+ graph()->NewNode(
+ common()->Int32Constant(kInt64UpperHalfMemoryOffset)),
GetReplacementHigh(input), graph()->start(), graph()->start());
Node* store_low_word = graph()->NewNode(
@@ -569,7 +575,8 @@ void Int64Lowering::LowerNode(Node* node) {
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
stack_slot,
- graph()->NewNode(common()->Int32Constant(kLowerWordOffset)),
+ graph()->NewNode(
+ common()->Int32Constant(kInt64LowerHalfMemoryOffset)),
GetReplacementLow(input), store_high_word, graph()->start());
Node* load =
@@ -597,13 +604,15 @@ void Int64Lowering::LowerNode(Node* node) {
Node* high_node = graph()->NewNode(
machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(kHigherWordOffset)), store,
- graph()->start());
+ graph()->NewNode(
+ common()->Int32Constant(kInt64UpperHalfMemoryOffset)),
+ store, graph()->start());
Node* low_node = graph()->NewNode(
machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(kLowerWordOffset)), store,
- graph()->start());
+ graph()->NewNode(
+ common()->Int32Constant(kInt64LowerHalfMemoryOffset)),
+ store, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 811c2b2046..c14dc95d8b 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -27,8 +27,10 @@ class V8_EXPORT_PRIVATE Int64Lowering {
static int GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature);
- static const int kLowerWordOffset;
- static const int kHigherWordOffset;
+ // Determine whether the given type is i64 and has to be passed via two
+ // parameters on the given machine.
+ static bool IsI64AsTwoParameters(MachineOperatorBuilder* machine,
+ MachineRepresentation type);
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 087015b6b1..a1c83ce1b6 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -5,7 +5,6 @@
#include "src/compiler/js-builtin-reducer.h"
#include "src/base/bits.h"
-#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
@@ -296,9 +295,8 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* value = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
+ simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
- NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, jsgraph()->Constant(map), effect, control);
effect = graph()->NewNode(
@@ -726,6 +724,104 @@ Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
return NoChange();
}
+// ES6 section 22.1.2.2 Array.isArray ( arg )
+Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
+ // We certainly know that undefined is not an array.
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ int count = 0;
+ Node* values[5];
+ Node* effects[5];
+ Node* controls[4];
+
+ // Check if the {value} is a Smi.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ // The {value} is a Smi.
+ controls[count] = graph()->NewNode(common()->IfTrue(), control);
+ effects[count] = effect;
+ values[count] = jsgraph()->FalseConstant();
+ count++;
+
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Load the {value}s instance type.
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* value_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ effect, control);
+
+ // Check if the {value} is a JSArray.
+ check = graph()->NewNode(simplified()->NumberEqual(), value_instance_type,
+ jsgraph()->Constant(JS_ARRAY_TYPE));
+ control = graph()->NewNode(common()->Branch(), check, control);
+
+ // The {value} is a JSArray.
+ controls[count] = graph()->NewNode(common()->IfTrue(), control);
+ effects[count] = effect;
+ values[count] = jsgraph()->TrueConstant();
+ count++;
+
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Check if the {value} is a JSProxy.
+ check = graph()->NewNode(simplified()->NumberEqual(), value_instance_type,
+ jsgraph()->Constant(JS_PROXY_TYPE));
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ // The {value} is neither a JSArray nor a JSProxy.
+ controls[count] = graph()->NewNode(common()->IfFalse(), control);
+ effects[count] = effect;
+ values[count] = jsgraph()->FalseConstant();
+ count++;
+
+ control = graph()->NewNode(common()->IfTrue(), control);
+
+ // Let the %ArrayIsArray runtime function deal with the JSProxy {value}.
+ value = effect = control =
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kArrayIsArray), value,
+ context, frame_state, effect, control);
+ NodeProperties::SetType(value, Type::Boolean());
+
+ // Update potential {IfException} uses of {node} to point to the above
+ // %ArrayIsArray runtime call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, control);
+ NodeProperties::ReplaceEffectInput(on_exception, effect);
+ control = graph()->NewNode(common()->IfSuccess(), control);
+ Revisit(on_exception);
+ }
+
+ // The {value} is neither a JSArray nor a JSProxy.
+ controls[count] = control;
+ effects[count] = effect;
+ values[count] = value;
+ count++;
+
+ control = graph()->NewNode(common()->Merge(count), count, controls);
+ effects[count] = control;
+ values[count] = control;
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, values);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 22.1.3.17 Array.prototype.pop ( )
Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Handle<Map> receiver_map;
@@ -909,179 +1005,6 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
return NoChange();
}
-// ES6 section 22.1.3.22 Array.prototype.shift ( )
-Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->array_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
-
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
-
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
-
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
-
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
-
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
-
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind());
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(), index,
- jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, etrue1, if_true1);
-
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
- kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry = jsgraph()->ExternalConstant(
- ExternalReference(builtin_entry, isolate()));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver, argc,
- target, jsgraph()->UndefinedConstant(), entry,
- argc, context, frame_state, efalse1, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
@@ -1611,14 +1534,15 @@ Reduction JSBuiltinReducer::ReduceNumberIsInteger(Node* node) {
// ES6 section 20.1.2.4 Number.isNaN ( number )
Reduction JSBuiltinReducer::ReduceNumberIsNaN(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Number.isNaN(a:number) -> BooleanNot(NumberEqual(a, a))
- Node* input = r.GetJSCallInput(0);
- Node* check = graph()->NewNode(simplified()->NumberEqual(), input, input);
- Node* value = graph()->NewNode(simplified()->BooleanNot(), check);
+ if (r.InputsMatchZero()) {
+ // Number.isNaN() -> #false
+ Node* value = jsgraph()->FalseConstant();
return Replace(value);
}
- return NoChange();
+ // Number.isNaN(a:number) -> ObjectIsNaN(a)
+ Node* input = r.GetJSCallInput(0);
+ Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
+ return Replace(value);
}
// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
@@ -1679,7 +1603,7 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* value = effect =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
jsgraph()->Constant(size), effect, control);
effect =
graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
@@ -1732,7 +1656,7 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* value = effect =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ graph()->NewNode(simplified()->Allocate(Type::Any(), NOT_TENURED),
jsgraph()->Constant(instance_size), effect, control);
effect =
graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
@@ -1904,6 +1828,37 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
return NoChange();
}
+// ES6 String.prototype.concat(...args)
+// #sec-string.prototype.concat
+Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // String.prototype.concat(lhs:string, rhs:plain-primitive)
+ // -> Call[StringAddStub](lhs, rhs)
+ StringAddFlags flags = r.InputsMatchOne(Type::String())
+ ? STRING_ADD_CHECK_NONE
+ : STRING_ADD_CONVERT_RIGHT;
+ // TODO(turbofan): Massage the FrameState of the {node} here once we
+ // have an artificial builtin frame type, so that it looks like the
+ // exception from StringAdd overflow came from String.prototype.concat
+ // builtin instead of the calling function.
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState,
+ Operator::kNoDeopt | Operator::kNoWrite);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
+ node->ReplaceInput(1, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
// ES6 String.prototype.indexOf(searchString [, position])
// #sec-string.prototype.indexof
Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
@@ -1944,9 +1899,8 @@ Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* value = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
+ simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
jsgraph()->Constant(JSStringIterator::kSize), effect, control);
- NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, map, effect, control);
effect = graph()->NewNode(
@@ -2165,12 +2119,12 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayIterator(node, IterationKind::kValues);
case kArrayIteratorNext:
return ReduceArrayIteratorNext(node);
+ case kArrayIsArray:
+ return ReduceArrayIsArray(node);
case kArrayPop:
return ReduceArrayPop(node);
case kArrayPush:
return ReduceArrayPush(node);
- case kArrayShift:
- return ReduceArrayShift(node);
case kDateNow:
return ReduceDateNow(node);
case kDateGetTime:
@@ -2305,6 +2259,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringCharAt(node);
case kStringCharCodeAt:
return ReduceStringCharCodeAt(node);
+ case kStringConcat:
+ return ReduceStringConcat(node);
case kStringIndexOf:
return ReduceStringIndexOf(node);
case kStringIterator:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index a694697070..e792ad3c3a 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -55,9 +55,9 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind);
Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
IterationKind kind);
+ Reduction ReduceArrayIsArray(Node* node);
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
- Reduction ReduceArrayShift(Node* node);
Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
@@ -103,6 +103,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceObjectCreate(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
+ Reduction ReduceStringConcat(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index c0deb915f8..f0febc4d26 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -63,6 +63,21 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
return Changed(node);
}
+// ES6 section 19.3.1.1 Boolean ( value )
+Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+
+ // Replace the {node} with a proper {JSToBoolean} operator.
+ DCHECK_LE(2u, p.arity());
+ Node* value = (p.arity() == 2) ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), value,
+ context);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
// ES6 section 20.1.1 The Number Constructor
Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
@@ -119,16 +134,25 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int formal_parameter_count;
int start_index = 0;
- {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- formal_parameter_count = shared->internal_formal_parameter_count();
- }
+ // Determine the formal parameter count;
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ int formal_parameter_count = shared->internal_formal_parameter_count();
if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
- if (formal_parameter_count != 0) return NoChange();
+ // Mapped arguments (sloppy mode) that are aliased can only be handled
+ // here if there's no side-effect between the {node} and the {arg_array}.
+ // TODO(turbofan): Further relax this constraint.
+ if (formal_parameter_count != 0) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect != arg_array) {
+ if (effect->op()->EffectInputCount() != 1 ||
+ !(effect->op()->properties() & Operator::kNoWrite)) {
+ return NoChange();
+ }
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ }
} else if (type == CreateArgumentsType::kRestParameter) {
start_index = formal_parameter_count;
}
@@ -136,20 +160,6 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// the outermost function.
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
if (outer_state->opcode() != IrOpcode::kFrameState) {
- // TODO(jarin,bmeurer): Support the NewUnmappedArgumentsElement and
- // NewRestParameterElements in the EscapeAnalysis and Deoptimizer
- // instead, then we don't need this hack.
- // Only works with zero formal parameters because of lacking deoptimizer
- // support.
- if (type != CreateArgumentsType::kRestParameter &&
- formal_parameter_count == 0) {
- // There are no other uses of the {arg_array} except in StateValues,
- // so we just replace {arg_array} with a marker for the Deoptimizer
- // that this refers to the arguments object.
- Node* arguments = graph()->NewNode(common()->ArgumentsObjectState());
- ReplaceWithValue(arg_array, arguments);
- }
-
// Reduce {node} to a JSCallForwardVarargs operation, which just
// re-pushes the incoming arguments and calls the {target}.
node->RemoveInput(0); // Function.prototype.apply
@@ -327,7 +337,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kReliableReceiverMaps) {
+ if (result != NodeProperties::kNoReceiverMaps) {
Handle<Map> candidate_map(
receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
@@ -342,6 +352,15 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
receiver_map->prototype() != *candidate_prototype) {
return NoChange();
}
+ if (result == NodeProperties::kUnreliableReceiverMaps &&
+ !receiver_map->is_stable()) {
+ return NoChange();
+ }
+ }
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ dependencies()->AssumeMapStable(receiver_maps[i]);
+ }
}
Node* value = jsgraph()->Constant(candidate_prototype);
ReplaceWithValue(node, value);
@@ -487,6 +506,32 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
return Changed(node);
}
+namespace {
+
+bool ShouldUseCallICFeedback(Node* node) {
+ HeapObjectMatcher m(node);
+ if (m.HasValue() || m.IsJSCreateClosure()) {
+ // Don't use CallIC feedback when we know the function
+ // being called, i.e. either know the closure itself or
+ // at least the SharedFunctionInfo.
+ return false;
+ } else if (m.IsPhi()) {
+ // Protect against endless loops here.
+ Node* control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kLoop) return false;
+ // Check if {node} is a Phi of nodes which shouldn't
+ // use CallIC feedback (not looking through loops).
+ int const value_input_count = m.node()->op()->ValueInputCount();
+ for (int n = 0; n < value_input_count; ++n) {
+ if (ShouldUseCallICFeedback(node->InputAt(n))) return true;
+ }
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
Reduction JSCallReducer::ReduceJSCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -515,6 +560,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Check for known builtin functions.
switch (shared->code()->builtin_index()) {
+ case Builtins::kBooleanConstructor:
+ return ReduceBooleanConstructor(node);
case Builtins::kFunctionPrototypeApply:
return ReduceFunctionPrototypeApply(node);
case Builtins::kFunctionPrototypeCall:
@@ -607,9 +654,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return Changed(node);
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// Retrieve the Array function from the {node}.
@@ -626,6 +670,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
NodeProperties::ReplaceEffectInput(node, effect);
return ReduceArrayConstructor(node);
} else if (feedback->IsWeakCell()) {
+ // Check if we want to use CallIC feedback here.
+ if (!ShouldUseCallICFeedback(target)) return NoChange();
+
Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
if (cell->value()->IsJSFunction()) {
Node* target_function =
@@ -651,7 +698,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
- CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+ SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 1);
@@ -714,9 +761,6 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
Handle<Object> feedback(nexus.GetFeedback(), isolate());
@@ -746,6 +790,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
return Changed(node);
} else if (feedback->IsWeakCell()) {
+ // Check if we want to use CallIC feedback here.
+ if (!ShouldUseCallICFeedback(target)) return NoChange();
+
Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
if (cell->value()->IsJSFunction()) {
Node* target_function =
@@ -775,8 +822,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
- ConstructWithSpreadParameters const& p =
- ConstructWithSpreadParametersOf(node->op());
+ SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 2);
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 10b8ee8992..29ca61c100 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -27,19 +27,11 @@ class SimplifiedOperatorBuilder;
// which might allow inlining or other optimizations to be performed afterwards.
class JSCallReducer final : public AdvancedReducer {
public:
- // Flags that control the mode of operation.
- enum Flag {
- kNoFlags = 0u,
- kDeoptimizationEnabled = 1u << 0,
- };
- typedef base::Flags<Flag> Flags;
-
- JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+ JSCallReducer(Editor* editor, JSGraph* jsgraph,
Handle<Context> native_context,
CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- flags_(flags),
native_context_(native_context),
dependencies_(dependencies) {}
@@ -47,6 +39,7 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(
Node* node, Node* target,
Handle<FunctionTemplateInfo> function_template_info);
@@ -68,7 +61,6 @@ class JSCallReducer final : public AdvancedReducer {
Handle<JSObject>* holder);
Graph* graph() const;
- Flags flags() const { return flags_; }
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Factory* factory() const;
@@ -79,13 +71,10 @@ class JSCallReducer final : public AdvancedReducer {
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
};
-DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 9a2edc13e3..0deb7cb38b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/contexts.h"
@@ -18,6 +19,8 @@ namespace compiler {
Reduction JSContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ return ReduceParameter(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
@@ -28,6 +31,20 @@ Reduction JSContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSContextSpecialization::ReduceParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ int const index = ParameterIndexOf(node->op());
+ if (index == Linkage::kJSCallClosureParamIndex) {
+ // Constant-fold the function parameter {node}.
+ Handle<JSFunction> function;
+ if (closure().ToHandle(&function)) {
+ Node* value = jsgraph()->HeapConstant(function);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
Reduction JSContextSpecialization::SimplifyJSLoadContext(Node* node,
Node* new_context,
size_t new_depth) {
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 99172af446..a38aca80bb 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -21,12 +21,17 @@ class JSOperatorBuilder;
class JSContextSpecialization final : public AdvancedReducer {
public:
JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
- MaybeHandle<Context> context)
- : AdvancedReducer(editor), jsgraph_(jsgraph), context_(context) {}
+ MaybeHandle<Context> context,
+ MaybeHandle<JSFunction> closure)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ context_(context),
+ closure_(closure) {}
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceParameter(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
@@ -39,9 +44,11 @@ class JSContextSpecialization final : public AdvancedReducer {
JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
MaybeHandle<Context> context() const { return context_; }
+ MaybeHandle<JSFunction> closure() const { return closure_; }
JSGraph* const jsgraph_;
MaybeHandle<Context> context_;
+ MaybeHandle<JSFunction> closure_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index f3ceb2b0c0..432b5c620b 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -43,11 +43,8 @@ class AllocationBuilder final {
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
- graph()->NewNode(simplified()->Allocate(pretenure),
+ graph()->NewNode(simplified()->Allocate(type, pretenure),
jsgraph()->Constant(size), effect_, control_);
- // TODO(turbofan): Maybe we should put the Type* onto the Allocate operator
- // at some point, or maybe we should have a completely differnt story.
- NodeProperties::SetType(allocation_, type);
effect_ = allocation_;
}
@@ -120,6 +117,7 @@ Node* GetArgumentsFrameState(Node* frame_state) {
bool IsAllocationInlineable(Handle<JSFunction> target,
Handle<JSFunction> new_target) {
return new_target->has_initial_map() &&
+ !new_target->initial_map()->is_dictionary_map() &&
new_target->initial_map()->constructor_or_backpointer() == *target;
}
@@ -197,9 +195,13 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
// Maximum depth and total number of elements and properties for literal
-// graphs to be considered for fast deep-copying.
+// graphs to be considered for fast deep-copying. The limit is chosen to
+// match the maximum number of inobject properties, to ensure that the
+// performance of using object literals is not worse than using constructor
+// functions, see crbug.com/v8/6211 for details.
const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties = 8;
+const int kMaxFastLiteralProperties =
+ (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
} // namespace
@@ -310,12 +312,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
if (shared_info->internal_formal_parameter_count() == 0) {
Node* const callee = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const arguments_frame =
+ graph()->NewNode(simplified()->ArgumentsFrame());
+ Node* const arguments_length = graph()->NewNode(
+ simplified()->ArgumentsLength(0, false), arguments_frame);
// Allocate the elements backing store.
- Node* const elements = effect = graph()->NewNode(
- simplified()->NewUnmappedArgumentsElements(0), effect);
- Node* const length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
+ Node* const elements = effect =
+ graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
+ arguments_frame, arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map = jsgraph()->HeapConstant(
handle(native_context()->sloppy_arguments_map(), isolate()));
@@ -327,7 +331,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), length);
+ a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
a.Store(AccessBuilder::ForArgumentsCallee(), callee);
RelaxControls(node);
a.FinishAndChange(node);
@@ -351,14 +355,16 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Handle<SharedFunctionInfo> shared_info;
if (state_info.shared_info().ToHandle(&shared_info)) {
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const arguments_frame =
+ graph()->NewNode(simplified()->ArgumentsFrame());
+ Node* const arguments_length = graph()->NewNode(
+ simplified()->ArgumentsLength(
+ shared_info->internal_formal_parameter_count(), false),
+ arguments_frame);
// Allocate the elements backing store.
- Node* const elements = effect = graph()->NewNode(
- simplified()->NewUnmappedArgumentsElements(
- shared_info->internal_formal_parameter_count()),
- effect);
- Node* const length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
+ Node* const elements = effect =
+ graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
+ arguments_frame, arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map = jsgraph()->HeapConstant(
handle(native_context()->strict_arguments_map(), isolate()));
@@ -370,7 +376,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), length);
+ a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
RelaxControls(node);
a.FinishAndChange(node);
} else {
@@ -390,14 +396,19 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Handle<SharedFunctionInfo> shared_info;
if (state_info.shared_info().ToHandle(&shared_info)) {
Node* effect = NodeProperties::GetEffectInput(node);
- // Allocate the elements backing store.
- Node* const elements = effect = graph()->NewNode(
- simplified()->NewRestParameterElements(
- shared_info->internal_formal_parameter_count()),
- effect);
- Node* const length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
+ Node* const arguments_frame =
+ graph()->NewNode(simplified()->ArgumentsFrame());
+ int formal_parameter_count =
+ shared_info->internal_formal_parameter_count();
+ Node* const rest_length = graph()->NewNode(
+ simplified()->ArgumentsLength(formal_parameter_count, true),
+ arguments_frame);
+ // Allocate the elements backing store. Since
+ // NewUnmappedArgumentsElements copies from the end of the arguments
+ // adapter frame, this is a suffix of the actual arguments.
+ Node* const elements = effect =
+ graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
+ arguments_frame, rest_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->HeapConstant(handle(
native_context()->js_array_fast_elements_map_index(), isolate()));
@@ -409,7 +420,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), rest_length);
RelaxControls(node);
a.FinishAndChange(node);
} else {
@@ -636,8 +647,6 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
graph()->NewNode(common()->Branch(BranchHint::kFalse), equal, control);
Node* call_holey;
Node* call_packed;
- Node* if_success_packed;
- Node* if_success_holey;
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* if_equal = graph()->NewNode(common()->IfTrue(), branch);
@@ -661,7 +670,6 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
call_holey =
graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- if_success_holey = graph()->NewNode(common()->IfSuccess(), call_holey);
}
Node* if_not_equal = graph()->NewNode(common()->IfFalse(), branch);
{
@@ -685,10 +693,8 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
call_packed =
graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- if_success_packed = graph()->NewNode(common()->IfSuccess(), call_packed);
}
- Node* merge = graph()->NewNode(common()->Merge(2), if_success_holey,
- if_success_packed);
+ Node* merge = graph()->NewNode(common()->Merge(2), call_holey, call_packed);
Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), call_holey,
call_packed, merge);
Node* phi =
@@ -1097,17 +1103,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
ElementAccess access = IsFastDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
- Node* value;
- if (IsFastDoubleElementsKind(elements_kind)) {
- // Load the hole NaN pattern from the canonical location.
- value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
- jsgraph()->ExternalConstant(
- ExternalReference::address_of_the_hole_nan()),
- effect, control);
- } else {
- value = jsgraph()->TheHoleConstant();
- }
+ Node* value = jsgraph()->TheHoleConstant();
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
@@ -1138,11 +1134,6 @@ Node* JSCreateLowering::AllocateFastLiteral(
// Setup the properties backing store.
Node* properties = jsgraph()->EmptyFixedArrayConstant();
- // Setup the elements backing store.
- Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
- pretenure, site_context);
- if (elements->op()->EffectOutputCount() > 0) effect = elements;
-
// Compute the in-object properties to store first (might have effects).
Handle<Map> boilerplate_map(boilerplate->map(), isolate());
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
@@ -1207,6 +1198,11 @@ Node* JSCreateLowering::AllocateFastLiteral(
inobject_fields.push_back(std::make_pair(access, value));
}
+ // Setup the elements backing store.
+ Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
+ pretenure, site_context);
+ if (elements->op()->EffectOutputCount() > 0) effect = elements;
+
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(boilerplate_map->instance_size(), pretenure,
@@ -1255,18 +1251,9 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
Handle<FixedDoubleArray> elements =
Handle<FixedDoubleArray>::cast(boilerplate_elements);
- Node* the_hole_value = nullptr;
for (int i = 0; i < elements_length; ++i) {
if (elements->is_the_hole(i)) {
- if (the_hole_value == nullptr) {
- // Load the hole NaN pattern from the canonical location.
- the_hole_value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
- jsgraph()->ExternalConstant(
- ExternalReference::address_of_the_hole_nan()),
- effect, control);
- }
- elements_values[i] = the_hole_value;
+ elements_values[i] = jsgraph()->TheHoleConstant();
} else {
elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 79a3377462..2b333c06c5 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -73,7 +73,6 @@ REPLACE_STUB_CALL(GreaterThan)
REPLACE_STUB_CALL(GreaterThanOrEqual)
REPLACE_STUB_CALL(HasProperty)
REPLACE_STUB_CALL(Equal)
-REPLACE_STUB_CALL(NotEqual)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
@@ -127,15 +126,6 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
Operator::kEliminatable);
}
-void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
- // The !== operator doesn't need the current context.
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::StrictNotEqual(isolate());
- node->RemoveInput(4); // control
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
- Operator::kEliminatable);
-}
-
void JSGenericLowering::LowerJSToBoolean(Node* node) {
// The ToBoolean conversion doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
@@ -167,107 +157,122 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::KeyedLoadIC(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 3, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::LoadIC(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 3, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
- Callable callable =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, vector);
- ReplaceWithStubCall(node, callable, flags);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 2, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
-
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* key = NodeProperties::GetValueInput(node, 1);
- Node* value = NodeProperties::GetValueInput(node, 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
- Callable callable =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 2);
- node->ReplaceInput(Descriptor::kReceiver, receiver);
- node->ReplaceInput(Descriptor::kName, key);
- node->ReplaceInput(Descriptor::kValue, value);
- node->ReplaceInput(Descriptor::kSlot,
- jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(Descriptor::kVector, vector);
- ReplaceWithStubCall(node, callable, flags);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::KeyedStoreIC(isolate(), p.language_mode());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 4, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
-
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* value = NodeProperties::GetValueInput(node, 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable =
- CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 3);
- node->ReplaceInput(Descriptor::kReceiver, receiver);
- node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
- node->ReplaceInput(Descriptor::kValue, value);
- node->ReplaceInput(Descriptor::kSlot,
- jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(Descriptor::kVector, vector);
- ReplaceWithStubCall(node, callable, flags);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::StoreIC(isolate(), p.language_mode());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 4, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* value = NodeProperties::GetValueInput(node, 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
- Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 3);
- node->ReplaceInput(Descriptor::kReceiver, receiver);
- node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
- node->ReplaceInput(Descriptor::kValue, value);
- node->ReplaceInput(Descriptor::kSlot,
- jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(Descriptor::kVector, vector);
- ReplaceWithStubCall(node, callable, flags);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable = CodeFactory::StoreOwnIC(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 4, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
- Callable callable =
- CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
// Load global object from the context.
Node* native_context = effect =
graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
@@ -278,20 +283,25 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
machine()->Load(MachineType::AnyTagged()), native_context,
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
effect, control);
- typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 4);
- node->ReplaceInput(Descriptor::kReceiver, global);
- node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
- node->ReplaceInput(Descriptor::kValue, value);
- node->ReplaceInput(Descriptor::kSlot,
- jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(Descriptor::kVector, vector);
- node->ReplaceInput(7, effect);
- ReplaceWithStubCall(node, callable, flags);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Callable callable =
+ CodeFactory::StoreGlobalIC(isolate(), p.language_mode());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ Callable callable =
+ CodeFactory::StoreGlobalICInOptimizedCode(isolate(), p.language_mode());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 4, vector);
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
- DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
node->InsertInputs(zone(), 4, 2);
node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
@@ -402,8 +412,7 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
ScopeType scope_type = parameters.scope_type();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- if (slot_count <=
- ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
Callable callable =
CodeFactory::FastNewFunctionContext(isolate(), scope_type);
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
@@ -432,8 +441,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
// Use the FastCloneShallowArray builtin only for shallow boilerplates without
// properties up to the number of elements that the stubs can handle.
if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
- p.length() <
- ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements) {
+ p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
Callable callable = CodeFactory::FastCloneShallowArray(
isolate(), DONT_TRACK_ALLOCATION_SITE);
ReplaceWithStubCall(node, callable, flags);
@@ -455,7 +463,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
// without elements up to the number of properties that the stubs can handle.
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
p.length() <=
- ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties) {
+ ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
Callable callable =
CodeFactory::FastCloneShallowObject(isolate(), p.length());
ReplaceWithStubCall(node, callable, flags);
@@ -528,8 +536,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
- ConstructWithSpreadParameters const& p =
- ConstructWithSpreadParametersOf(node->op());
+ SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
@@ -582,7 +589,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
- CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+ SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
Callable callable = CodeFactory::CallWithSpread(isolate());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -665,21 +672,29 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
NodeProperties::ReplaceControlInput(node, if_false);
- Node* efalse = node;
+ Node* efalse = if_false = node;
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
// Wire the new diamond into the graph, {node} can still throw.
- NodeProperties::ReplaceUses(node, node, ephi, node, node);
+ NodeProperties::ReplaceUses(node, node, ephi, merge, merge);
+ NodeProperties::ReplaceControlInput(merge, if_false, 1);
NodeProperties::ReplaceEffectInput(ephi, efalse, 1);
- // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
- // the node and places it inside the diamond. Come up with a helper method!
- for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kIfSuccess) {
- use->ReplaceUses(merge);
- merge->ReplaceInput(1, use);
+ // This iteration cuts out potential {IfSuccess} or {IfException} projection
+ // uses of the original node and places them inside the diamond, so that we
+ // can change the original {node} into the slow-path runtime call.
+ for (Edge edge : merge->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) continue;
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ NodeProperties::ReplaceUses(edge.from(), nullptr, nullptr, merge);
+ NodeProperties::ReplaceControlInput(merge, edge.from(), 1);
+ edge.UpdateTo(node);
+ }
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ NodeProperties::ReplaceEffectInput(edge.from(), node);
+ edge.UpdateTo(node);
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 6f99fbb183..9774de28e5 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -164,6 +164,10 @@ void JSInliningHeuristic::Finalize() {
auto i = candidates_.begin();
Candidate candidate = *i;
candidates_.erase(i);
+ // Only include candidates that we've successfully called before.
+ // The candidate list is sorted, so we can exit at the first occurance of
+ // frequency 0 in the list.
+ if (candidate.frequency <= 0.0) return;
// Make sure we don't try to inline dead candidate nodes.
if (!candidate.node->IsDead()) {
Reduction const reduction = InlineCandidate(candidate);
@@ -222,26 +226,21 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
// to the known {target}); the last input is the control dependency.
inputs[0] = target;
inputs[input_count - 1] = if_successes[i];
- calls[i] = graph()->NewNode(node->op(), input_count, inputs);
- if_successes[i] = graph()->NewNode(common()->IfSuccess(), calls[i]);
+ calls[i] = if_successes[i] =
+ graph()->NewNode(node->op(), input_count, inputs);
}
// Check if we have an exception projection for the call {node}.
Node* if_exception = nullptr;
- for (Edge const edge : node->use_edges()) {
- if (NodeProperties::IsControlEdge(edge) &&
- edge.from()->opcode() == IrOpcode::kIfException) {
- if_exception = edge.from();
- break;
- }
- }
- if (if_exception != nullptr) {
- // Morph the {if_exception} projection into a join.
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
Node* if_exceptions[kMaxCallPolymorphism + 1];
for (int i = 0; i < num_calls; ++i) {
+ if_successes[i] = graph()->NewNode(common()->IfSuccess(), calls[i]);
if_exceptions[i] =
graph()->NewNode(common()->IfException(), calls[i], calls[i]);
}
+
+ // Morph the {if_exception} projection into a join.
Node* exception_control =
graph()->NewNode(common()->Merge(num_calls), num_calls, if_exceptions);
if_exceptions[num_calls] = exception_control;
@@ -254,7 +253,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
exception_control);
}
- // Morph the call site into the dispatched call sites.
+ // Morph the original call site into a join of the dispatched call sites.
Node* control =
graph()->NewNode(common()->Merge(num_calls), num_calls, if_successes);
calls[num_calls] = control;
@@ -271,6 +270,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
Node* node = calls[i];
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
+ // Killing the call node is not strictly necessary, but it is safer to
+ // make sure we do not resurrect the node.
+ node->Kill();
cumulative_count_ += function->shared()->ast_node_count();
}
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index c87be6c236..af24b703d3 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -141,12 +141,15 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
int subcall_count = static_cast<int>(uncaught_subcalls.size());
if (subcall_count > 0) {
TRACE(
- "Inlinee contains %d calls without IfException; "
- "linking to existing IfException\n",
+ "Inlinee contains %d calls without local exception handler; "
+ "linking to surrounding exception handler\n",
subcall_count);
}
NodeVector on_exception_nodes(local_zone_);
for (Node* subcall : uncaught_subcalls) {
+ Node* on_success = graph()->NewNode(common()->IfSuccess(), subcall);
+ NodeProperties::ReplaceUses(subcall, subcall, subcall, on_success);
+ NodeProperties::ReplaceControlInput(on_success, subcall);
Node* on_exception =
graph()->NewNode(common()->IfException(), subcall, subcall);
on_exception_nodes.push_back(on_exception);
@@ -215,7 +218,8 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
ReplaceWithValue(call, value_output, effect_output, control_output);
return Changed(value_output);
} else {
- ReplaceWithValue(call, call, call, jsgraph()->Dead());
+ ReplaceWithValue(call, jsgraph()->Dead(), jsgraph()->Dead(),
+ jsgraph()->Dead());
return Changed(call);
}
}
@@ -529,40 +533,23 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
- // Find the IfException node, if any.
+ // Calls surrounded by a local try-block are only inlined if the appropriate
+ // flag is active. We also discover the {IfException} projection this way.
Node* exception_target = nullptr;
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsControlEdge(edge) &&
- edge.from()->opcode() == IrOpcode::kIfException) {
- DCHECK_NULL(exception_target);
- exception_target = edge.from();
- }
- }
-
- NodeVector uncaught_subcalls(local_zone_);
-
- if (exception_target != nullptr) {
- if (!FLAG_inline_into_try) {
- TRACE(
- "Try block surrounds #%d:%s and --no-inline-into-try active, so not "
- "inlining %s into %s.\n",
- exception_target->id(), exception_target->op()->mnemonic(),
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- } else {
- TRACE(
- "Inlining %s into %s regardless of surrounding try-block to catcher "
- "#%d:%s\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get(),
- exception_target->id(), exception_target->op()->mnemonic());
- }
+ if (NodeProperties::IsExceptionalCall(node, &exception_target) &&
+ !FLAG_inline_into_try) {
+ TRACE(
+ "Try block surrounds #%d:%s and --no-inline-into-try active, so not "
+ "inlining %s into %s.\n",
+ exception_target->id(), exception_target->op()->mnemonic(),
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
}
ParseInfo parse_info(shared_info);
CompilationInfo info(parse_info.zone(), &parse_info,
- Handle<JSFunction>::null());
+ shared_info->GetIsolate(), Handle<JSFunction>::null());
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
info.MarkAsOptimizeFromBytecode();
@@ -586,9 +573,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
- TRACE("Inlining %s into %s\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ TRACE("Inlining %s into %s%s\n", shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get(),
+ (exception_target != nullptr) ? " (inside try-block)" : "");
// Determine the targets feedback vector and its context.
Node* context;
@@ -601,9 +588,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
{
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
+ JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
+ if (info_->is_bailout_on_uninitialized()) {
+ flags |= JSTypeHintLowering::kBailoutOnUninitialized;
+ }
BytecodeGraphBuilder graph_builder(
parse_info.zone(), shared_info, feedback_vector, BailoutId::None(),
- jsgraph(), call.frequency(), source_positions_, inlining_id);
+ jsgraph(), call.frequency(), source_positions_, inlining_id, flags);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
@@ -611,23 +602,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
end = graph()->end();
}
+ // If we are inlining into a surrounding exception handler, we collect all
+ // potentially throwing nodes within the inlinee that are not handled locally
+ // by the inlinee itself. They are later wired into the surrounding handler.
+ NodeVector uncaught_subcalls(local_zone_);
if (exception_target != nullptr) {
// Find all uncaught 'calls' in the inlinee.
AllNodes inlined_nodes(local_zone_, end, graph());
for (Node* subnode : inlined_nodes.reachable) {
- // Every possibly throwing node with an IfSuccess should get an
- // IfException.
- if (subnode->op()->HasProperty(Operator::kNoThrow)) {
- continue;
- }
- bool hasIfException = false;
- for (Node* use : subnode->uses()) {
- if (use->opcode() == IrOpcode::kIfException) {
- hasIfException = true;
- break;
- }
- }
- if (!hasIfException) {
+ // Every possibly throwing node should get {IfSuccess} and {IfException}
+ // projections, unless there already is local exception handling.
+ if (subnode->op()->HasProperty(Operator::kNoThrow)) continue;
+ if (!NodeProperties::IsExceptionalCall(subnode)) {
DCHECK_EQ(2, subnode->op()->ControlOutputCount());
uncaught_subcalls.push_back(subnode);
}
@@ -666,9 +652,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
context, frame_state_inside, effect, control);
- Node* success = graph()->NewNode(common()->IfSuccess(), create);
- uncaught_subcalls.push_back(create); // Adds {IfException}.
- NodeProperties::ReplaceControlInput(node, success);
+ uncaught_subcalls.push_back(create); // Adds {IfSuccess} & {IfException}.
+ NodeProperties::ReplaceControlInput(node, create);
NodeProperties::ReplaceEffectInput(node, create);
// Insert a check of the return value to determine whether the return
// value or the implicit receiver should be selected as a result of the
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 8a866eeec4..a18551c642 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -40,6 +40,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGeneratorClose(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
+ case Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos:
+ return ReduceAsyncGeneratorGetAwaitInputOrDebugPos(node);
+ case Runtime::kInlineAsyncGeneratorReject:
+ return ReduceAsyncGeneratorReject(node);
+ case Runtime::kInlineAsyncGeneratorResolve:
+ return ReduceAsyncGeneratorResolve(node);
case Runtime::kInlineGeneratorGetResumeMode:
return ReduceGeneratorGetResumeMode(node);
case Runtime::kInlineGeneratorGetContext:
@@ -163,6 +169,25 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorGetAwaitInputOrDebugPos(
+ Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op = simplified()->LoadField(
+ AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos());
+
+ return Change(node, op, generator, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
+ return Change(node, CodeFactory::AsyncGeneratorReject(isolate()), 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorResolve(Node* node) {
+ return Change(node, CodeFactory::AsyncGeneratorResolve(isolate()), 0);
+}
+
Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -298,6 +323,12 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
+ // ToString is unnecessary if the input is a string.
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
+ if (m.HasValue() && m.Value()->IsString()) {
+ ReplaceWithValue(node, m.node());
+ return Replace(m.node());
+ }
NodeProperties::ChangeOp(node, javascript()->ToString());
return Changed(node);
}
@@ -313,14 +344,8 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
}
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
- Node* active_function = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* active_function_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- active_function, effect, control);
- return Change(node, simplified()->LoadField(AccessBuilder::ForMapPrototype()),
- active_function_map, effect, control);
+ NodeProperties::ChangeOp(node, javascript()->GetSuperConstructor());
+ return Changed(node);
}
Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index f3e3e2ab3b..2a2baf0930 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -46,6 +46,10 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceGeneratorClose(Node* node);
Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+ Reduction ReduceAsyncGeneratorGetAwaitInputOrDebugPos(Node* node);
+ Reduction ReduceAsyncGeneratorReject(Node* node);
+ Reduction ReduceAsyncGeneratorResolve(Node* node);
+ Reduction ReduceGeneratorSaveInputForAwait(Node* node);
Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index c32ee269a0..66013b85ca 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -138,9 +138,6 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
- // If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
if (!m.HasValue()) return NoChange();
@@ -176,9 +173,6 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Check if the right hand side is a known {receiver}.
HeapObjectMatcher m(constructor);
if (!m.HasValue() || !m.Value()->IsJSObject()) return NoChange();
@@ -217,21 +211,37 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
return reduction.Changed() ? reduction : Changed(node);
}
- } else if (access_info.IsDataConstant()) {
- DCHECK(access_info.constant()->IsCallable());
-
+ } else if (access_info.IsDataConstant() ||
+ access_info.IsDataConstantField()) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
AssumePrototypesStable(access_info.receiver_maps(), holder);
+ } else {
+ holder = receiver;
+ }
+
+ Handle<Object> constant;
+ if (access_info.IsDataConstant()) {
+ DCHECK(!FLAG_track_constant_fields);
+ constant = access_info.constant();
+ } else {
+ DCHECK(FLAG_track_constant_fields);
+ DCHECK(access_info.IsDataConstantField());
+ // The value must be callable therefore tagged.
+ DCHECK(CanBeTaggedPointer(access_info.field_representation()));
+ FieldIndex field_index = access_info.field_index();
+ constant = JSObject::FastPropertyAt(holder, Representation::Tagged(),
+ field_index);
}
+ DCHECK(constant->IsCallable());
// Monomorphic property access.
effect = BuildCheckMaps(constructor, effect, control,
access_info.receiver_maps());
// Call the @@hasInstance handler.
- Node* target = jsgraph()->Constant(access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
node->InsertInput(graph()->zone(), 0, target);
node->ReplaceInput(1, constructor);
node->ReplaceInput(2, object);
@@ -513,9 +523,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
return Replace(value);
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Lookup the {name} on the global object instead.
return ReduceGlobalAccess(node, nullptr, nullptr, name, AccessMode::kLoad);
}
@@ -539,9 +546,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
return Replace(value);
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Lookup the {name} on the global object instead.
return ReduceGlobalAccess(node, nullptr, value, name, AccessMode::kStore);
}
@@ -549,7 +553,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
- Handle<FeedbackVector> vector, FeedbackSlot slot, Node* index) {
+ Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -561,9 +565,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Check if we have an access o.x or o.x=v where o is the current
// native contexts' global proxy, and turn that into a direct access
// to the current native contexts' global object instead.
@@ -598,13 +599,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
return NoChange();
}
- } else if (access_info.IsGeneric()) {
- // We do not handle generic calls in try blocks.
- if (is_exceptional) return NoChange();
- // We only handle the generic store IC case.
- if (!vector->IsStoreIC(slot)) {
- return NoChange();
- }
}
}
@@ -643,7 +637,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state, effect, control, name,
- access_info, access_mode, language_mode, vector, slot);
+ access_info, access_mode, language_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -746,10 +740,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Generate the actual property access.
- ValueEffectControl continuation =
- BuildPropertyAccess(this_receiver, this_value, context, frame_state,
- this_effect, this_control, name, access_info,
- access_mode, language_mode, vector, slot);
+ ValueEffectControl continuation = BuildPropertyAccess(
+ this_receiver, this_value, context, frame_state, this_effect,
+ this_control, name, access_info, access_mode, language_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -790,19 +783,16 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
- if (flags() & kDeoptimizationEnabled) {
- // Check if we are accessing the current native contexts' global proxy.
- HeapObjectMatcher m(receiver);
- if (m.HasValue() && m.Value().is_identical_to(global_proxy())) {
- // Optimize accesses to the current native contexts' global proxy.
- return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
- }
+ // Check if we are accessing the current native contexts' global proxy.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value().is_identical_to(global_proxy())) {
+ // Optimize accesses to the current native contexts' global proxy.
+ return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
- if ((flags() & kDeoptimizationEnabled) &&
- (flags() & kBailoutOnUninitialized)) {
+ if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
@@ -815,8 +805,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
} else if (receiver_maps.length() == 0) {
- if ((flags() & kDeoptimizationEnabled) &&
- (flags() & kBailoutOnUninitialized)) {
+ if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
@@ -826,7 +815,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode, nexus.vector_handle(), nexus.slot());
+ language_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
@@ -847,14 +836,12 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// {function} in order to be notified about changes to the
// "prototype" of {function}, so it doesn't make sense to
// continue unless deoptimization is enabled.
- if (flags() & kDeoptimizationEnabled) {
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- Handle<Object> prototype(initial_map->prototype(), isolate());
- Node* value = jsgraph()->Constant(prototype);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Handle<Object> prototype(function->prototype(), isolate());
+ Node* value = jsgraph()->Constant(prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
} else if (m.Value()->IsString() &&
p.name().is_identical_to(factory()->length_string())) {
@@ -915,9 +902,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Check for keyed access to strings.
if (HasOnlyStringMaps(receiver_maps)) {
// Strings are immutable in JavaScript.
@@ -1163,41 +1147,40 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (mreceiver.HasValue() && mreceiver.Value()->IsString()) {
Handle<String> string = Handle<String>::cast(mreceiver.Value());
+ // Strings are immutable in JavaScript.
+ if (access_mode == AccessMode::kStore) return NoChange();
+
+ // Properly deal with constant {index}.
+ NumberMatcher mindex(index);
+ if (mindex.IsInteger() && mindex.IsInRange(0.0, string->length() - 1)) {
+ // Constant-fold the {index} access to {string}.
+ Node* value = jsgraph()->HeapConstant(
+ factory()->LookupSingleCharacterStringFromCode(
+ string->Get(static_cast<int>(mindex.Value()))));
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
// We can only assume that the {index} is a valid array index if the IC
// is in element access mode and not MEGAMORPHIC, otherwise there's no
// guard for the bounds check below.
if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
-
- // Properly deal with constant {index}.
- NumberMatcher mindex(index);
- if (mindex.IsInteger() && mindex.IsInRange(0.0, string->length() - 1)) {
- // Constant-fold the {index} access to {string}.
- Node* value = jsgraph()->HeapConstant(
- factory()->LookupSingleCharacterStringFromCode(
- string->Get(static_cast<int>(mindex.Value()))));
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- } else if (flags() & kDeoptimizationEnabled) {
- // Ensure that {index} is less than {receiver} length.
- Node* length = jsgraph()->Constant(string->length());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
-
- // Return the character from the {receiver} as single character string.
- value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
- control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
+ // Ensure that {index} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(string->length());
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
+
+ // Return the character from the {receiver} as single character string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+ control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
}
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
- if ((flags() & kDeoptimizationEnabled) &&
- (flags() & kBailoutOnUninitialized)) {
+ if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
@@ -1210,8 +1193,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
} else if (receiver_maps.length() == 0) {
- if ((flags() & kDeoptimizationEnabled) &&
- (flags() & kBailoutOnUninitialized)) {
+ if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
@@ -1235,17 +1217,16 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
} else {
name = factory()->InternalizeName(name);
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode, nexus.vector_handle(),
- nexus.slot());
+ language_mode);
}
}
}
// Check if we have feedback for a named access.
if (Name* name = nexus.FindFirstName()) {
- return ReduceNamedAccess(
- node, value, receiver_maps, handle(name, isolate()), access_mode,
- language_mode, nexus.vector_handle(), nexus.slot(), index);
+ return ReduceNamedAccess(node, value, receiver_maps,
+ handle(name, isolate()), access_mode,
+ language_mode, index);
} else if (nexus.GetKeyType() != ELEMENT) {
// The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
// that the {index} is a valid array index, thus we just let the IC continue
@@ -1319,8 +1300,7 @@ JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
- AccessMode access_mode, LanguageMode language_mode,
- Handle<FeedbackVector> vector, FeedbackSlot slot) {
+ AccessMode access_mode, LanguageMode language_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
@@ -1333,6 +1313,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
DCHECK_EQ(AccessMode::kLoad, access_mode);
value = jsgraph()->UndefinedConstant();
} else if (access_info.IsDataConstant()) {
+ DCHECK(!FLAG_track_constant_fields);
Node* constant_value = jsgraph()->Constant(access_info.constant());
if (access_mode == AccessMode::kStore) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
@@ -1365,11 +1346,10 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the getter function.
if (access_info.constant()->IsJSFunction()) {
- value = effect = graph()->NewNode(
+ value = effect = control = graph()->NewNode(
javascript()->Call(2, 0.0f, VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state0, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), value);
} else {
DCHECK(access_info.constant()->IsFunctionTemplateInfo());
Handle<FunctionTemplateInfo> function_template_info(
@@ -1402,11 +1382,10 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the setter function.
if (access_info.constant()->IsJSFunction()) {
- effect = graph()->NewNode(
+ effect = control = graph()->NewNode(
javascript()->Call(3, 0.0f, VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state0, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), effect);
} else {
DCHECK(access_info.constant()->IsFunctionTemplateInfo());
Handle<FunctionTemplateInfo> function_template_info(
@@ -1422,7 +1401,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
break;
}
}
- } else if (access_info.IsDataField() || access_info.IsDataConstantField()) {
+ } else {
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
MachineRepresentation const field_representation =
@@ -1531,7 +1511,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
common()->BeginRegion(RegionObservability::kNotObservable),
effect);
Node* box = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), box,
@@ -1634,44 +1614,44 @@ JSNativeContextSpecialization::BuildPropertyAccess(
UNREACHABLE();
break;
}
+ // Check if we need to perform a transitioning store.
Handle<Map> transition_map;
if (access_info.transition_map().ToHandle(&transition_map)) {
+ // Check if we need to grow the properties backing store
+ // with this transitioning store.
+ Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
+ isolate());
+ if (original_map->unused_property_fields() == 0) {
+ DCHECK(!field_index.is_inobject());
+
+ // Reallocate the properties {storage}.
+ storage = effect = BuildExtendPropertiesBackingStore(
+ original_map, storage, effect, control);
+
+ // Perform the actual store.
+ effect = graph()->NewNode(simplified()->StoreField(field_access),
+ storage, value, effect, control);
+
+ // Atomically switch to the new properties below.
+ field_access = AccessBuilder::ForJSObjectProperties();
+ value = storage;
+ storage = receiver;
+ }
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
jsgraph()->Constant(transition_map), effect, control);
- }
- effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
- value, effect, control);
- if (access_info.HasTransitionMap()) {
+ effect = graph()->NewNode(simplified()->StoreField(field_access),
+ storage, value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
jsgraph()->UndefinedConstant(), effect);
+ } else {
+ // Regular non-transitioning field store.
+ effect = graph()->NewNode(simplified()->StoreField(field_access),
+ storage, value, effect, control);
}
}
- } else {
- DCHECK(access_info.IsGeneric());
- DCHECK_EQ(AccessMode::kStore, access_mode);
- DCHECK(vector->IsStoreIC(slot));
- DCHECK_EQ(vector->GetLanguageMode(slot), language_mode);
- Callable callable =
- CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
- const CallInterfaceDescriptor& descriptor = callable.descriptor();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), descriptor,
- descriptor.GetStackParameterCount(), CallDescriptor::kNeedsFrameState,
- Operator::kNoProperties);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* name_node = jsgraph()->HeapConstant(name);
- Node* slot_node = jsgraph()->Constant(vector->GetIndex(slot));
- Node* vector_node = jsgraph()->HeapConstant(vector);
-
- Node* inputs[] = {stub_code, receiver, name_node, value, slot_node,
- vector_node, context, frame_state, effect, control};
-
- value = effect = control =
- graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- control = graph()->NewNode(common()->IfSuccess(), control);
}
return ValueEffectControl(value, effect, control);
@@ -1681,10 +1661,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
- // If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
- DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (!p.feedback().IsValid()) return NoChange();
@@ -1719,10 +1696,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
return NoChange();
}
- if (access_info.IsGeneric()) {
- return NoChange();
- }
-
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1746,8 +1719,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state_lazy, effect, control, cached_name,
- access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY,
- p.feedback().vector(), p.feedback().slot());
+ access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1990,10 +1962,12 @@ JSNativeContextSpecialization::BuildElementAccess(
if (access_mode == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (IsHoleyElementsKind(elements_kind)) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
+ }
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
element_access.machine_type = MachineType::AnyTagged();
}
// Perform the actual backing store access.
@@ -2117,10 +2091,10 @@ JSNativeContextSpecialization::InlineApiCall(
inputs[6] = value;
}
+ Node* control0;
Node* effect0;
- Node* value0 = effect0 =
+ Node* value0 = effect0 = control0 =
graph()->NewNode(common()->Call(call_descriptor), index, inputs);
- Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
return ValueEffectControl(value0, effect0, control0);
}
@@ -2178,6 +2152,45 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
effect, control);
}
+Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
+ Handle<Map> map, Node* properties, Node* effect, Node* control) {
+ DCHECK_EQ(0, map->unused_property_fields());
+ // Compute the length of the old {properties} and the new properties.
+ int length = map->NextFreePropertyIndex() - map->GetInObjectProperties();
+ int new_length = length + JSObject::kFieldsAdded;
+ // Collect the field values from the {properties}.
+ ZoneVector<Node*> values(zone());
+ values.reserve(new_length);
+ for (int i = 0; i < length; ++i) {
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArraySlot(i)),
+ properties, effect, control);
+ values.push_back(value);
+ }
+ // Initialize the new fields to undefined.
+ for (int i = 0; i < JSObject::kFieldsAdded; ++i) {
+ values.push_back(jsgraph()->UndefinedConstant());
+ }
+ // Allocate and initialize the new properties.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* new_properties = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
+ jsgraph()->Constant(FixedArray::SizeFor(new_length)), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ new_properties, jsgraph()->FixedArrayMapConstant(),
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
+ new_properties, jsgraph()->Constant(new_length), effect, control);
+ for (int i = 0; i < new_length; ++i) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)),
+ new_properties, values[i], effect, control);
+ }
+ return graph()->NewNode(common()->FinishRegion(), new_properties, effect);
+}
+
void JSNativeContextSpecialization::AssumePrototypesStable(
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 249c52d4e3..cd1b3349ad 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -40,8 +40,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
enum Flag {
kNoFlags = 0u,
kAccessorInliningEnabled = 1u << 0,
- kBailoutOnUninitialized = 1u << 1,
- kDeoptimizationEnabled = 1u << 2,
+ kBailoutOnUninitialized = 1u << 1
};
typedef base::Flags<Flag> Flags;
@@ -86,7 +85,6 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
- Handle<FeedbackVector> vector, FeedbackSlot slot,
Node* index = nullptr);
Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
Handle<Name> name, AccessMode access_mode,
@@ -111,12 +109,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state,
- Node* effect, Node* control, Handle<Name> name,
- PropertyAccessInfo const& access_info, AccessMode access_mode,
- LanguageMode language_mode, Handle<FeedbackVector> vector,
- FeedbackSlot slot);
+ ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
+ Node* context, Node* frame_state,
+ Node* effect, Node* control,
+ Handle<Name> name,
+ PropertyAccessInfo const& access_info,
+ AccessMode access_mode,
+ LanguageMode language_mode);
// Construct the appropriate subgraph for element access.
ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -133,6 +132,10 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
std::vector<Handle<Map>> const& maps);
+ // Construct appropriate subgraph to extend properties backing store.
+ Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
+ Node* effect, Node* control);
+
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a8f5692d54..74156b086d 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -76,29 +76,28 @@ ConstructParameters const& ConstructParametersOf(Operator const* op) {
return OpParameter<ConstructParameters>(op);
}
-bool operator==(ConstructWithSpreadParameters const& lhs,
- ConstructWithSpreadParameters const& rhs) {
+bool operator==(SpreadWithArityParameter const& lhs,
+ SpreadWithArityParameter const& rhs) {
return lhs.arity() == rhs.arity();
}
-bool operator!=(ConstructWithSpreadParameters const& lhs,
- ConstructWithSpreadParameters const& rhs) {
+bool operator!=(SpreadWithArityParameter const& lhs,
+ SpreadWithArityParameter const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(ConstructWithSpreadParameters const& p) {
+size_t hash_value(SpreadWithArityParameter const& p) {
return base::hash_combine(p.arity());
}
-std::ostream& operator<<(std::ostream& os,
- ConstructWithSpreadParameters const& p) {
+std::ostream& operator<<(std::ostream& os, SpreadWithArityParameter const& p) {
return os << p.arity();
}
-ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
- Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSConstructWithSpread, op->opcode());
- return OpParameter<ConstructWithSpreadParameters>(op);
+SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSConstructWithSpread ||
+ op->opcode() == IrOpcode::kJSCallWithSpread);
+ return OpParameter<SpreadWithArityParameter>(op);
}
std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
@@ -123,28 +122,6 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
return OpParameter<CallForwardVarargsParameters>(op);
}
-bool operator==(CallWithSpreadParameters const& lhs,
- CallWithSpreadParameters const& rhs) {
- return lhs.arity() == rhs.arity();
-}
-
-bool operator!=(CallWithSpreadParameters const& lhs,
- CallWithSpreadParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-size_t hash_value(CallWithSpreadParameters const& p) {
- return base::hash_combine(p.arity());
-}
-
-std::ostream& operator<<(std::ostream& os, CallWithSpreadParameters const& p) {
- return os << p.arity();
-}
-
-CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSCallWithSpread, op->opcode());
- return OpParameter<CallWithSpreadParameters>(op);
-}
bool operator==(CallRuntimeParameters const& lhs,
CallRuntimeParameters const& rhs) {
@@ -298,27 +275,25 @@ StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
return OpParameter<StoreNamedOwnParameters>(op);
}
-bool operator==(DataPropertyParameters const& lhs,
- DataPropertyParameters const& rhs) {
+bool operator==(FeedbackParameter const& lhs, FeedbackParameter const& rhs) {
return lhs.feedback() == rhs.feedback();
}
-bool operator!=(DataPropertyParameters const& lhs,
- DataPropertyParameters const& rhs) {
+bool operator!=(FeedbackParameter const& lhs, FeedbackParameter const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(DataPropertyParameters const& p) {
+size_t hash_value(FeedbackParameter const& p) {
return base::hash_combine(p.feedback());
}
-std::ostream& operator<<(std::ostream& os, DataPropertyParameters const& p) {
+std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
return os;
}
-DataPropertyParameters const& DataPropertyParametersOf(const Operator* op) {
+FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
- return OpParameter<DataPropertyParameters>(op);
+ return OpParameter<FeedbackParameter>(op);
}
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
@@ -541,6 +516,31 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
+bool operator==(GeneratorStoreParameters const& lhs,
+ GeneratorStoreParameters const& rhs) {
+ return lhs.register_count() == rhs.register_count() &&
+ lhs.suspend_type() == rhs.suspend_type();
+}
+bool operator!=(GeneratorStoreParameters const& lhs,
+ GeneratorStoreParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(GeneratorStoreParameters const& p) {
+ return base::hash_combine(p.register_count(),
+ static_cast<int>(p.suspend_type()));
+}
+
+std::ostream& operator<<(std::ostream& os, GeneratorStoreParameters const& p) {
+ const char* suspend_type = SuspendTypeFor(p.suspend_type());
+ return os << p.register_count() << " (" << suspend_type << ")";
+}
+
+const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op) {
+ DCHECK_EQ(op->opcode(), IrOpcode::kJSGeneratorStore);
+ return OpParameter<GeneratorStoreParameters>(op);
+}
+
BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
return OpParameter<BinaryOperationHint>(op);
@@ -548,9 +548,7 @@ BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
CompareOperationHint CompareOperationHintOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSEqual ||
- op->opcode() == IrOpcode::kJSNotEqual ||
op->opcode() == IrOpcode::kJSStrictEqual ||
- op->opcode() == IrOpcode::kJSStrictNotEqual ||
op->opcode() == IrOpcode::kJSLessThan ||
op->opcode() == IrOpcode::kJSGreaterThan ||
op->opcode() == IrOpcode::kJSLessThanOrEqual ||
@@ -596,9 +594,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
#define COMPARE_OP_LIST(V) \
V(Equal, Operator::kNoProperties) \
- V(NotEqual, Operator::kNoProperties) \
V(StrictEqual, Operator::kPure) \
- V(StrictNotEqual, Operator::kPure) \
V(LessThan, Operator::kNoProperties) \
V(GreaterThan, Operator::kNoProperties) \
V(LessThanOrEqual, Operator::kNoProperties) \
@@ -723,8 +719,8 @@ COMPARE_OP_LIST(COMPARE_OP)
const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
const VectorSlotPair& feedback) {
- DataPropertyParameters parameters(feedback);
- return new (zone()) Operator1<DataPropertyParameters>( // --
+ FeedbackParameter parameters(feedback);
+ return new (zone()) Operator1<FeedbackParameter>( // --
IrOpcode::kJSStoreDataPropertyInLiteral,
Operator::kNoThrow, // opcode
"JSStoreDataPropertyInLiteral", // name
@@ -765,8 +761,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
}
const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
- CallWithSpreadParameters parameters(arity);
- return new (zone()) Operator1<CallWithSpreadParameters>( // --
+ SpreadWithArityParameter parameters(arity);
+ return new (zone()) Operator1<SpreadWithArityParameter>( // --
IrOpcode::kJSCallWithSpread, Operator::kNoProperties, // opcode
"JSCallWithSpread", // name
parameters.arity(), 1, 1, 1, 1, 2, // counts
@@ -808,8 +804,8 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
}
const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
- ConstructWithSpreadParameters parameters(arity);
- return new (zone()) Operator1<ConstructWithSpreadParameters>( // --
+ SpreadWithArityParameter parameters(arity);
+ return new (zone()) Operator1<SpreadWithArityParameter>( // --
IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode
"JSConstructWithSpread", // name
parameters.arity(), 1, 1, 1, 1, 2, // counts
@@ -845,12 +841,14 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
-const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
- return new (zone()) Operator1<int>( // --
- IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
- "JSGeneratorStore", // name
- 3 + register_count, 1, 1, 0, 1, 0, // counts
- register_count); // parameter
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count,
+ SuspendFlags suspend_flags) {
+ GeneratorStoreParameters parameters(register_count, suspend_flags);
+ return new (zone()) Operator1<GeneratorStoreParameters>( // --
+ IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
+ "JSGeneratorStore", // name
+ 3 + register_count, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 730b4b9551..d7b0dfab9b 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -87,12 +87,12 @@ std::ostream& operator<<(std::ostream&, ConstructParameters const&);
ConstructParameters const& ConstructParametersOf(Operator const*);
-// Defines the arity for a JavaScript constructor call with a spread as the last
-// parameters. This is used as a parameter by JSConstructWithSpread
-// operators.
-class ConstructWithSpreadParameters final {
+// Defines the arity for JavaScript calls with a spread as the last
+// parameter. This is used as a parameter by JSConstructWithSpread and
+// JSCallWithSpread operators.
+class SpreadWithArityParameter final {
public:
- explicit ConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+ explicit SpreadWithArityParameter(uint32_t arity) : arity_(arity) {}
uint32_t arity() const { return arity_; }
@@ -100,17 +100,16 @@ class ConstructWithSpreadParameters final {
uint32_t const arity_;
};
-bool operator==(ConstructWithSpreadParameters const&,
- ConstructWithSpreadParameters const&);
-bool operator!=(ConstructWithSpreadParameters const&,
- ConstructWithSpreadParameters const&);
+bool operator==(SpreadWithArityParameter const&,
+ SpreadWithArityParameter const&);
+bool operator!=(SpreadWithArityParameter const&,
+ SpreadWithArityParameter const&);
-size_t hash_value(ConstructWithSpreadParameters const&);
+size_t hash_value(SpreadWithArityParameter const&);
-std::ostream& operator<<(std::ostream&, ConstructWithSpreadParameters const&);
+std::ostream& operator<<(std::ostream&, SpreadWithArityParameter const&);
-ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
- Operator const*);
+SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const*);
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSCallForwardVarargs operators.
@@ -198,29 +197,6 @@ std::ostream& operator<<(std::ostream&, CallParameters const&);
const CallParameters& CallParametersOf(const Operator* op);
-// Defines the arity for a JavaScript constructor call with a spread as the last
-// parameters. This is used as a parameter by JSConstructWithSpread
-// operators.
-class CallWithSpreadParameters final {
- public:
- explicit CallWithSpreadParameters(uint32_t arity) : arity_(arity) {}
-
- uint32_t arity() const { return arity_; }
-
- private:
- uint32_t const arity_;
-};
-
-bool operator==(CallWithSpreadParameters const&,
- CallWithSpreadParameters const&);
-bool operator!=(CallWithSpreadParameters const&,
- CallWithSpreadParameters const&);
-
-size_t hash_value(CallWithSpreadParameters const&);
-
-std::ostream& operator<<(std::ostream&, CallWithSpreadParameters const&);
-
-CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const*);
// Defines the arity and the ID for a runtime function call. This is used as a
// parameter by JSCallRuntime operators.
@@ -356,9 +332,9 @@ const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
// Defines the feedback, i.e., vector and index, for storing a data property in
// an object literal. This is
// used as a parameter by the JSStoreDataPropertyInLiteral operator.
-class DataPropertyParameters final {
+class FeedbackParameter final {
public:
- explicit DataPropertyParameters(VectorSlotPair const& feedback)
+ explicit FeedbackParameter(VectorSlotPair const& feedback)
: feedback_(feedback) {}
VectorSlotPair const& feedback() const { return feedback_; }
@@ -367,14 +343,14 @@ class DataPropertyParameters final {
VectorSlotPair const feedback_;
};
-bool operator==(DataPropertyParameters const&, DataPropertyParameters const&);
-bool operator!=(DataPropertyParameters const&, DataPropertyParameters const&);
+bool operator==(FeedbackParameter const&, FeedbackParameter const&);
+bool operator!=(FeedbackParameter const&, FeedbackParameter const&);
-size_t hash_value(DataPropertyParameters const&);
+size_t hash_value(FeedbackParameter const&);
-std::ostream& operator<<(std::ostream&, DataPropertyParameters const&);
+std::ostream& operator<<(std::ostream&, FeedbackParameter const&);
-const DataPropertyParameters& DataPropertyParametersOf(const Operator* op);
+const FeedbackParameter& FeedbackParameterOf(const Operator* op);
// Defines the property of an object for a named access. This is
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
@@ -574,6 +550,33 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+class GeneratorStoreParameters final {
+ public:
+ GeneratorStoreParameters(int register_count, SuspendFlags flags)
+ : register_count_(register_count), suspend_flags_(flags) {}
+
+ int register_count() const { return register_count_; }
+ SuspendFlags suspend_flags() const { return suspend_flags_; }
+ SuspendFlags suspend_type() const {
+ return suspend_flags_ & SuspendFlags::kSuspendTypeMask;
+ }
+
+ private:
+ int register_count_;
+ SuspendFlags suspend_flags_;
+};
+
+bool operator==(GeneratorStoreParameters const&,
+ GeneratorStoreParameters const&);
+bool operator!=(GeneratorStoreParameters const&,
+ GeneratorStoreParameters const&);
+
+size_t hash_value(GeneratorStoreParameters const&);
+
+std::ostream& operator<<(std::ostream&, GeneratorStoreParameters const&);
+
+const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op);
+
BinaryOperationHint BinaryOperationHintOf(const Operator* op);
CompareOperationHint CompareOperationHintOf(const Operator* op);
@@ -587,9 +590,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
explicit JSOperatorBuilder(Zone* zone);
const Operator* Equal(CompareOperationHint hint);
- const Operator* NotEqual(CompareOperationHint hint);
const Operator* StrictEqual(CompareOperationHint hint);
- const Operator* StrictNotEqual(CompareOperationHint hint);
const Operator* LessThan(CompareOperationHint hint);
const Operator* GreaterThan(CompareOperationHint hint);
const Operator* LessThanOrEqual(CompareOperationHint hint);
@@ -692,7 +693,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreMessage();
// Used to implement Ignition's SuspendGenerator bytecode.
- const Operator* GeneratorStore(int register_count);
+ const Operator* GeneratorStore(int register_count,
+ SuspendFlags suspend_flags);
// Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreContinuation();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index e30e016c79..7c70b1ea11 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -14,11 +14,35 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+bool BinaryOperationHintToNumberOperationHint(
+ BinaryOperationHint binop_hint, NumberOperationHint* number_hint) {
+ switch (binop_hint) {
+ case BinaryOperationHint::kSignedSmall:
+ *number_hint = NumberOperationHint::kSignedSmall;
+ return true;
+ case BinaryOperationHint::kSigned32:
+ *number_hint = NumberOperationHint::kSigned32;
+ return true;
+ case BinaryOperationHint::kNumberOrOddball:
+ *number_hint = NumberOperationHint::kNumberOrOddball;
+ return true;
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
+ break;
+ }
+ return false;
+}
+
+} // namespace
+
class JSSpeculativeBinopBuilder final {
public:
- JSSpeculativeBinopBuilder(JSTypeHintLowering* lowering, const Operator* op,
- Node* left, Node* right, Node* effect,
- Node* control, FeedbackSlot slot)
+ JSSpeculativeBinopBuilder(const JSTypeHintLowering* lowering,
+ const Operator* op, Node* left, Node* right,
+ Node* effect, Node* control, FeedbackSlot slot)
: lowering_(lowering),
op_(op),
left_(left),
@@ -33,20 +57,33 @@ class JSSpeculativeBinopBuilder final {
return nexus.GetBinaryOperationFeedback();
}
+ CompareOperationHint GetCompareOperationHint() {
+ DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot_));
+ CompareICNexus nexus(feedback_vector(), slot_);
+ return nexus.GetCompareOperationFeedback();
+ }
+
bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
- switch (GetBinaryOperationHint()) {
- case BinaryOperationHint::kSignedSmall:
+ return BinaryOperationHintToNumberOperationHint(GetBinaryOperationHint(),
+ hint);
+ }
+
+ bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
+ switch (GetCompareOperationHint()) {
+ case CompareOperationHint::kSignedSmall:
*hint = NumberOperationHint::kSignedSmall;
return true;
- case BinaryOperationHint::kSigned32:
- *hint = NumberOperationHint::kSigned32;
+ case CompareOperationHint::kNumber:
+ *hint = NumberOperationHint::kNumber;
return true;
- case BinaryOperationHint::kNumberOrOddball:
+ case CompareOperationHint::kNumberOrOddball:
*hint = NumberOperationHint::kNumberOrOddball;
return true;
- case BinaryOperationHint::kAny:
- case BinaryOperationHint::kNone:
- case BinaryOperationHint::kString:
+ case CompareOperationHint::kAny:
+ case CompareOperationHint::kNone:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kInternalizedString:
break;
}
return false;
@@ -83,7 +120,28 @@ class JSSpeculativeBinopBuilder final {
return nullptr;
}
- Node* BuildSpeculativeOperator(const Operator* op) {
+ const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSEqual:
+ return simplified()->SpeculativeNumberEqual(hint);
+ case IrOpcode::kJSLessThan:
+ return simplified()->SpeculativeNumberLessThan(hint);
+ case IrOpcode::kJSGreaterThan:
+ std::swap(left_, right_); // a > b => b < a
+ return simplified()->SpeculativeNumberLessThan(hint);
+ case IrOpcode::kJSLessThanOrEqual:
+ return simplified()->SpeculativeNumberLessThanOrEqual(hint);
+ case IrOpcode::kJSGreaterThanOrEqual:
+ std::swap(left_, right_); // a >= b => b <= a
+ return simplified()->SpeculativeNumberLessThanOrEqual(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ Node* BuildSpeculativeOperation(const Operator* op) {
DCHECK_EQ(2, op->ValueInputCount());
DCHECK_EQ(1, op->EffectInputCount());
DCHECK_EQ(1, op->ControlInputCount());
@@ -94,6 +152,26 @@ class JSSpeculativeBinopBuilder final {
return graph()->NewNode(op, left_, right_, effect_, control_);
}
+ Node* TryBuildNumberBinop() {
+ NumberOperationHint hint;
+ if (GetBinaryNumberOperationHint(&hint)) {
+ const Operator* op = SpeculativeNumberOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
+ Node* TryBuildNumberCompare() {
+ NumberOperationHint hint;
+ if (GetCompareNumberOperationHint(&hint)) {
+ const Operator* op = SpeculativeCompareOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
JSGraph* jsgraph() const { return lowering_->jsgraph(); }
Graph* graph() const { return jsgraph()->graph(); }
JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
@@ -104,7 +182,7 @@ class JSSpeculativeBinopBuilder final {
}
private:
- JSTypeHintLowering* lowering_;
+ const JSTypeHintLowering* lowering_;
const Operator* op_;
Node* left_;
Node* right_;
@@ -114,14 +192,28 @@ class JSSpeculativeBinopBuilder final {
};
JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
- Handle<FeedbackVector> feedback_vector)
- : jsgraph_(jsgraph), feedback_vector_(feedback_vector) {}
+ Handle<FeedbackVector> feedback_vector,
+ Flags flags)
+ : jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {}
Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
Node* left, Node* right,
Node* effect, Node* control,
- FeedbackSlot slot) {
+ FeedbackSlot slot) const {
switch (op->opcode()) {
+ case IrOpcode::kJSStrictEqual:
+ break;
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThanOrEqual: {
+ JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
+ if (Node* node = b.TryBuildNumberCompare()) {
+ return Reduction(node);
+ }
+ break;
+ }
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSBitwiseAnd:
@@ -134,9 +226,7 @@ Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus: {
JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
- NumberOperationHint hint;
- if (b.GetBinaryNumberOperationHint(&hint)) {
- Node* node = b.BuildSpeculativeOperator(b.SpeculativeNumberOp(hint));
+ if (Node* node = b.TryBuildNumberBinop()) {
return Reduction(node);
}
break;
@@ -148,6 +238,93 @@ Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
return Reduction();
}
+Reduction JSTypeHintLowering::ReduceToNumberOperation(Node* input, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const {
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ NumberOperationHint hint;
+ if (BinaryOperationHintToNumberOperationHint(
+ nexus.GetBinaryOperationFeedback(), &hint)) {
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->SpeculativeToNumber(hint), input, effect,
+ control);
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceLoadNamedOperation(
+ const Operator* op, Node* obj, Node* effect, Node* control,
+ FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ LoadICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceLoadKeyedOperation(
+ const Operator* op, Node* obj, Node* key, Node* effect, Node* control,
+ FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ KeyedLoadICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceStoreNamedOperation(
+ const Operator* op, Node* obj, Node* val, Node* effect, Node* control,
+ FeedbackSlot slot) const {
+ DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
+ op->opcode() == IrOpcode::kJSStoreNamedOwn);
+ DCHECK(!slot.IsInvalid());
+ StoreICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceStoreKeyedOperation(
+ const Operator* op, Node* obj, Node* key, Node* val, Node* effect,
+ Node* control, FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ KeyedStoreICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
+ Node* control,
+ DeoptimizeReason reason) const {
+ if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
+ Node* deoptimize = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ jsgraph()->Dead(), effect, control);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(deoptimize);
+ deoptimize->ReplaceInput(0, frame_state);
+ return deoptimize;
+ }
+ return nullptr;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index d1dd1a86d6..7bd237814d 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -5,43 +5,87 @@
#ifndef V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
#define V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/deoptimize-reason.h"
#include "src/handles.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class FeedbackNexus;
+class FeedbackSlot;
+
namespace compiler {
// Forward declarations.
class JSGraph;
+class Node;
+class Operator;
-// The type-hint lowering consumes feedback about data operations (i.e. unary
-// and binary operations) to emit nodes using speculative simplified operators
-// in favor of the generic JavaScript operators.
+// The type-hint lowering consumes feedback about high-level operations in order
+// to potentially emit nodes using speculative simplified operators in favor of
+// the generic JavaScript operators.
//
// This lowering is implemented as an early reduction and can be applied before
// nodes are placed into the initial graph. It provides the ability to shortcut
// the JavaScript-level operators and directly emit simplified-level operators
// even during initial graph building. This is the reason this lowering doesn't
// follow the interface of the reducer framework used after graph construction.
+//
+// Also note that all reductions returned by this lowering will not produce any
+// control-output, but might very well produce an effect-output. The one node
+// returned as a replacement must fully describe the effect (i.e. produce the
+// effect and carry {Operator::Property} for the entire lowering). Use-sites
+// rely on this invariant, if it ever changes we need to switch the interface
+// away from using the {Reduction} class.
class JSTypeHintLowering {
public:
- JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector);
+ // Flags that control the mode of operation.
+ enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 1 };
+ typedef base::Flags<Flag> Flags;
- // Potential reduction of binary (arithmetic, logical and shift) operations.
+ JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector,
+ Flags flags);
+
+ // Potential reduction of binary (arithmetic, logical, shift and relational
+ // comparison) operations.
Reduction ReduceBinaryOperation(const Operator* op, Node* left, Node* right,
Node* effect, Node* control,
- FeedbackSlot slot);
+ FeedbackSlot slot) const;
+
+ // Potential reduction to ToNumber operations
+ Reduction ReduceToNumberOperation(Node* value, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction of property access operations.
+ Reduction ReduceLoadNamedOperation(const Operator* op, Node* obj,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+ Reduction ReduceLoadKeyedOperation(const Operator* op, Node* obj, Node* key,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+ Reduction ReduceStoreNamedOperation(const Operator* op, Node* obj, Node* val,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+ Reduction ReduceStoreKeyedOperation(const Operator* op, Node* obj, Node* key,
+ Node* val, Node* effect, Node* control,
+ FeedbackSlot slot) const;
private:
friend class JSSpeculativeBinopBuilder;
+ Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control,
+ DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Flags flags() const { return flags_; }
const Handle<FeedbackVector>& feedback_vector() const {
return feedback_vector_;
}
JSGraph* jsgraph_;
+ Flags const flags_;
Handle<FeedbackVector> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 31accbd86c..420e68cdf5 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -221,9 +221,8 @@ class JSBinopReduction final {
}
// Remove all effect and control inputs and outputs to this node and change
- // to the pure operator {op}, possibly inserting a boolean inversion.
- Reduction ChangeToPureOperator(const Operator* op, bool invert = false,
- Type* type = Type::Any()) {
+ // to the pure operator {op}.
+ Reduction ChangeToPureOperator(const Operator* op, Type* type = Type::Any()) {
DCHECK_EQ(0, op->EffectInputCount());
DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
DCHECK_EQ(0, op->ControlInputCount());
@@ -243,19 +242,10 @@ class JSBinopReduction final {
Type* node_type = NodeProperties::GetType(node_);
NodeProperties::SetType(node_, Type::Intersect(node_type, type, zone()));
- if (invert) {
- // Insert an boolean not to invert the value.
- Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
- node_->ReplaceUses(value);
- // Note: ReplaceUses() smashes all uses, so smash it back here.
- value->ReplaceInput(0, node_);
- return lowering_->Replace(value);
- }
return lowering_->Changed(node_);
}
- Reduction ChangeToSpeculativeOperator(const Operator* op, bool invert,
- Type* upper_bound) {
+ Reduction ChangeToSpeculativeOperator(const Operator* op, Type* upper_bound) {
DCHECK_EQ(1, op->EffectInputCount());
DCHECK_EQ(1, op->EffectOutputCount());
DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
@@ -271,19 +261,7 @@ class JSBinopReduction final {
// Reconnect the control output to bypass the IfSuccess node and
// possibly disconnect from the IfException node.
- for (Edge edge : node_->use_edges()) {
- Node* const user = edge.from();
- DCHECK(!user->IsDead());
- if (NodeProperties::IsControlEdge(edge)) {
- if (user->opcode() == IrOpcode::kIfSuccess) {
- user->ReplaceUses(NodeProperties::GetControlInput(node_));
- user->Kill();
- } else {
- DCHECK_EQ(user->opcode(), IrOpcode::kIfException);
- edge.UpdateTo(jsgraph()->Dead());
- }
- }
- }
+ lowering_->RelaxControls(node_);
// Remove the frame state and the context.
if (OperatorProperties::HasFrameStateInput(node_->op())) {
@@ -298,25 +276,9 @@ class JSBinopReduction final {
NodeProperties::SetType(node_,
Type::Intersect(node_type, upper_bound, zone()));
- if (invert) {
- // Insert an boolean not to invert the value.
- Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
- node_->ReplaceUses(value);
- // Note: ReplaceUses() smashes all uses, so smash it back here.
- value->ReplaceInput(0, node_);
- return lowering_->Replace(value);
- }
return lowering_->Changed(node_);
}
- Reduction ChangeToPureOperator(const Operator* op, Type* type) {
- return ChangeToPureOperator(op, false, type);
- }
-
- Reduction ChangeToSpeculativeOperator(const Operator* op, Type* type) {
- return ChangeToSpeculativeOperator(op, false, type);
- }
-
const Operator* NumberOp() {
switch (node_->opcode()) {
case IrOpcode::kJSAdd:
@@ -350,6 +312,12 @@ class JSBinopReduction final {
const Operator* NumberOpFromSpeculativeNumberOp() {
switch (node_->opcode()) {
+ case IrOpcode::kSpeculativeNumberEqual:
+ return simplified()->NumberEqual();
+ case IrOpcode::kSpeculativeNumberLessThan:
+ return simplified()->NumberLessThan();
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return simplified()->NumberLessThanOrEqual();
case IrOpcode::kSpeculativeNumberAdd:
return simplified()->NumberAdd();
case IrOpcode::kSpeculativeNumberSubtract:
@@ -436,9 +404,7 @@ class JSBinopReduction final {
DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
frame_state, effect(), control());
- Node* const if_success = graph()->NewNode(common()->IfSuccess(), n);
- NodeProperties::ReplaceControlInput(node_, if_success);
- NodeProperties::ReplaceUses(node_, node_, node_, node_, n);
+ NodeProperties::ReplaceControlInput(node_, n);
update_effect(n);
return n;
}
@@ -710,25 +676,27 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
Node* efalse = effect;
{
// Throw a RangeError in case of overflow.
- Node* vfalse = efalse = graph()->NewNode(
+ Node* vfalse = efalse = if_false = graph()->NewNode(
javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
context, frame_state, efalse, if_false);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), if_false);
- Revisit(graph()->end());
// Update potential {IfException} uses of {node} to point to the
// %ThrowInvalidStringLength runtime call node instead.
- for (Edge edge : node->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kIfException) {
- DCHECK(NodeProperties::IsControlEdge(edge) ||
- NodeProperties::IsEffectEdge(edge));
- edge.UpdateTo(vfalse);
- Revisit(edge.from());
- }
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse);
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ Revisit(on_exception);
}
+
+ // The above %ThrowInvalidStringLength runtime call is an unconditional
+ // throw, making it impossible to return a successful completion in this
+ // case. We simply connect the successful completion to the graph end.
+ if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+ Revisit(graph()->end());
}
control = graph()->NewNode(common()->IfTrue(), branch);
}
@@ -744,9 +712,8 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* value = effect =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ graph()->NewNode(simplified()->Allocate(Type::OtherString(), NOT_TENURED),
jsgraph()->Constant(ConsString::kSize), effect, control);
- NodeProperties::SetType(value, Type::OtherString());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, value_map, effect, control);
effect = graph()->NewNode(
@@ -771,6 +738,15 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
return Changed(node);
}
+Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
+ JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::Signed32()) ||
+ r.BothInputsAre(Type::Unsigned32())) {
+ return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp());
+ }
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
@@ -798,16 +774,12 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
return Changed(node);
}
- NumberOperationHint hint;
const Operator* less_than;
const Operator* less_than_or_equal;
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
- } else if (r.GetCompareNumberOperationHint(&hint)) {
- less_than = simplified()->SpeculativeNumberLessThan(hint);
- less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
} else if (r.OneInputCannotBe(Type::StringOrReceiver()) &&
(r.BothInputsAre(Type::PlainPrimitive()) ||
!(flags() & kDeoptimizationEnabled))) {
@@ -840,11 +812,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
default:
return NoChange();
}
- if (comparison->EffectInputCount() > 0) {
- return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
- } else {
- return r.ChangeToPureOperator(comparison);
- }
+ return r.ChangeToPureOperator(comparison);
}
Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
@@ -873,173 +841,101 @@ Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
- Node* input;
- Handle<String> type;
- HeapObjectBinopMatcher m(node);
- if (m.left().IsJSTypeOf() && m.right().HasValue() &&
- m.right().Value()->IsString()) {
- input = m.left().InputAt(0);
- type = Handle<String>::cast(m.right().Value());
- } else if (m.right().IsJSTypeOf() && m.left().HasValue() &&
- m.left().Value()->IsString()) {
- input = m.right().InputAt(0);
- type = Handle<String>::cast(m.left().Value());
- } else {
- return NoChange();
- }
- Node* value;
- if (String::Equals(type, factory()->boolean_string())) {
- value =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->TrueConstant()),
- jsgraph()->TrueConstant(),
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->FalseConstant()));
- } else if (String::Equals(type, factory()->function_string())) {
- value = graph()->NewNode(simplified()->ObjectIsDetectableCallable(), input);
- } else if (String::Equals(type, factory()->number_string())) {
- value = graph()->NewNode(simplified()->ObjectIsNumber(), input);
- } else if (String::Equals(type, factory()->object_string())) {
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ObjectIsNonCallable(), input),
- jsgraph()->TrueConstant(),
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->NullConstant()));
- } else if (String::Equals(type, factory()->string_string())) {
- value = graph()->NewNode(simplified()->ObjectIsString(), input);
- } else if (String::Equals(type, factory()->undefined_string())) {
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->NullConstant()),
- jsgraph()->FalseConstant(),
- graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
- } else {
- return NoChange();
- }
- if (invert) {
- value = graph()->NewNode(simplified()->BooleanNot(), value);
- }
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
- Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
- if (reduction.Changed()) return reduction;
-
+Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::UniqueName())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.IsInternalizedStringCompareOperation()) {
r.CheckInputsToInternalizedString();
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual());
}
if (r.BothInputsAre(Type::Boolean())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.BothInputsAre(Type::Receiver())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.OneInputIs(Type::Undetectable())) {
RelaxEffectsAndControls(node);
node->RemoveInput(r.LeftInputIs(Type::Undetectable()) ? 0 : 1);
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->ObjectIsUndetectable());
- if (invert) {
- // Insert an boolean not to invert the value.
- Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
- node->ReplaceUses(value);
- // Note: ReplaceUses() smashes all uses, so smash it back here.
- value->ReplaceInput(0, node);
- return Replace(value);
- }
return Changed(node);
}
- NumberOperationHint hint;
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
- return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
- } else if (r.GetCompareNumberOperationHint(&hint)) {
- return r.ChangeToSpeculativeOperator(
- simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+ return r.ChangeToPureOperator(simplified()->NumberEqual());
} else if (r.BothInputsAre(Type::Number())) {
- return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->NumberEqual());
} else if (r.IsReceiverCompareOperation()) {
r.CheckInputsToReceiver();
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual());
}
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
JSBinopReduction r(this, node);
if (r.left() == r.right()) {
// x === x is always true if x != NaN
- if (!r.left_type()->Maybe(Type::NaN())) {
- Node* replacement = jsgraph()->BooleanConstant(!invert);
- ReplaceWithValue(node, replacement);
- return Replace(replacement);
- }
+ Node* replacement = graph()->NewNode(
+ simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->ObjectIsNaN(), r.left()));
+ ReplaceWithValue(node, replacement);
+ return Replace(replacement);
}
if (r.OneInputCannotBe(Type::NumberOrString())) {
// For values with canonical representation (i.e. neither String, nor
// Number) an empty type intersection means the values cannot be strictly
// equal.
if (!r.left_type()->Maybe(r.right_type())) {
- Node* replacement = jsgraph()->BooleanConstant(invert);
+ Node* replacement = jsgraph()->FalseConstant();
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
}
- Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
- if (reduction.Changed()) return reduction;
-
if (r.BothInputsAre(Type::Unique())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.OneInputIs(pointer_comparable_type_)) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.IsInternalizedStringCompareOperation()) {
r.CheckInputsToInternalizedString();
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual());
}
NumberOperationHint hint;
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
- return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->NumberEqual());
} else if (r.GetCompareNumberOperationHint(&hint)) {
return r.ChangeToSpeculativeOperator(
- simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+ simplified()->SpeculativeNumberEqual(hint), Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
- return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->NumberEqual());
} else if (r.IsReceiverCompareOperation()) {
// For strict equality, it's enough to know that one input is a Receiver,
// as a strict equality comparison with a Receiver can only yield true if
// both sides refer to the same Receiver than.
r.CheckLeftInputToReceiver();
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual());
}
return NoChange();
}
@@ -1263,10 +1159,9 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
- rfalse = efalse = graph()->NewNode(
+ rfalse = efalse = if_false = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
receiver, context, frame_state, efalse, if_false);
- if_false = graph()->NewNode(common()->IfSuccess(), rfalse);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1310,7 +1205,8 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(mbase.Value());
- if (!array->GetBuffer()->was_neutered()) {
+ if (!array->GetBuffer()->was_neutered() &&
+ !array->GetBuffer()->is_wasm_buffer()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
size_t const k =
@@ -1362,7 +1258,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(mbase.Value());
- if (!array->GetBuffer()->was_neutered()) {
+ if (!array->GetBuffer()->was_neutered() &&
+ !array->GetBuffer()->is_wasm_buffer()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
size_t const k =
@@ -1536,18 +1433,18 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Node* vfalse1;
{
// Slow path, need to call the %HasInPrototypeChain runtime function.
- vfalse1 = efalse1 = graph()->NewNode(
+ vfalse1 = efalse1 = if_false1 = graph()->NewNode(
javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
prototype, context, frame_state, efalse1, if_false1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- // Replace any potential IfException on {node} to catch exceptions
+ // Replace any potential {IfException} uses of {node} to catch exceptions
// from this %HasInPrototypeChain runtime call instead.
- for (Edge edge : node->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kIfException) {
- edge.UpdateTo(vfalse1);
- Revisit(edge.from());
- }
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse1);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ Revisit(on_exception);
}
}
@@ -2196,9 +2093,6 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // We don't support lowering JSForInNext inside try blocks.
- if (NodeProperties::IsExceptionalCall(node)) return NoChange();
-
// We know that the {index} is in Unsigned32 range here, otherwise executing
// the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
// this is not always reflected in the types, hence we might need to rename
@@ -2244,15 +2138,26 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
- vfalse0 = efalse0 = graph()->NewNode(
+ vfalse0 = efalse0 = if_false0 = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
receiver, context, frame_state, effect, if_false0);
- if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
+
+ // Update potential {IfException} uses of {node} to point to the ahove
+ // ForInFilter stub call node instead.
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
+ NodeProperties::ReplaceControlInput(if_exception, vfalse0);
+ NodeProperties::ReplaceEffectInput(if_exception, efalse0);
+ Revisit(if_exception);
+ }
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
ReplaceWithValue(node, node, effect, control);
+
+ // Morph the {node} into a Phi.
node->ReplaceInput(0, vtrue0);
node->ReplaceInput(1, vfalse0);
node->ReplaceInput(2, control);
@@ -2292,19 +2197,21 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- int register_count = OpParameter<int>(node);
+ const GeneratorStoreParameters& p = GeneratorStoreParametersOf(node->op());
FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
FieldAccess input_or_debug_pos_field =
- AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ p.suspend_flags() == SuspendFlags::kAsyncGeneratorAwait
+ ? AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos()
+ : AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
generator, effect, control);
- for (int i = 0; i < register_count; ++i) {
+ for (int i = 0; i < p.register_count(); ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
@@ -2366,13 +2273,9 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
Reduction JSTypedLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSEqual:
- return ReduceJSEqual(node, false);
- case IrOpcode::kJSNotEqual:
- return ReduceJSEqual(node, true);
+ return ReduceJSEqual(node);
case IrOpcode::kJSStrictEqual:
- return ReduceJSStrictEqual(node, false);
- case IrOpcode::kJSStrictNotEqual:
- return ReduceJSStrictEqual(node, true);
+ return ReduceJSStrictEqual(node);
case IrOpcode::kJSLessThan: // fall through
case IrOpcode::kJSGreaterThan: // fall through
case IrOpcode::kJSLessThanOrEqual: // fall through
@@ -2455,6 +2358,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kSpeculativeNumberModulus:
return ReduceSpeculativeNumberBinop(node);
+ case IrOpcode::kSpeculativeNumberEqual:
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return ReduceSpeculativeNumberComparison(node);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 35195ec09d..98d71c3ed9 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -57,9 +57,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSStoreContext(Node* node);
Reduction ReduceJSLoadModule(Node* node);
Reduction ReduceJSStoreModule(Node* node);
- Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
- Reduction ReduceJSEqual(Node* node, bool invert);
- Reduction ReduceJSStrictEqual(Node* node, bool invert);
+ Reduction ReduceJSEqual(Node* node);
+ Reduction ReduceJSStrictEqual(Node* node);
Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToInteger(Node* node);
Reduction ReduceJSToLength(Node* node);
@@ -85,7 +84,9 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
Reduction ReduceCreateConsString(Node* node);
Reduction ReduceSpeculativeNumberAdd(Node* node);
+ Reduction ReduceSpeculativeNumberMultiply(Node* node);
Reduction ReduceSpeculativeNumberBinop(Node* node);
+ Reduction ReduceSpeculativeNumberComparison(Node* node);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 06f967a6a4..81c90d011f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -4,7 +4,7 @@
#include "src/compiler/linkage.h"
-#include "src/ast/scopes.h"
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 10140e1406..2c688a1cb5 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -37,6 +37,7 @@ Aliasing QueryAlias(Node* a, Node* b) {
break;
}
case IrOpcode::kFinishRegion:
+ case IrOpcode::kTypeGuard:
return QueryAlias(a, b->InputAt(0));
default:
break;
@@ -53,6 +54,7 @@ Aliasing QueryAlias(Node* a, Node* b) {
break;
}
case IrOpcode::kFinishRegion:
+ case IrOpcode::kTypeGuard:
return QueryAlias(a->InputAt(0), b);
default:
break;
@@ -798,23 +800,48 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (Node* replacement = state->LookupElement(object, index)) {
- // Make sure we don't resurrect dead {replacement} nodes.
- if (!replacement->IsDead()) {
- // We might need to guard the {replacement} if the type of the
- // {node} is more precise than the type of the {replacement}.
- Type* const node_type = NodeProperties::GetType(node);
- if (!NodeProperties::GetType(replacement)->Is(node_type)) {
- replacement = graph()->NewNode(common()->TypeGuard(node_type),
- replacement, control);
- NodeProperties::SetType(replacement, node_type);
+
+ // Only handle loads that do not require truncations.
+ ElementAccess const& access = ElementAccessOf(node->op());
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
+ case MachineRepresentation::kBit:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ // TODO(turbofan): Add support for doing the truncations.
+ break;
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ if (Node* replacement = state->LookupElement(object, index)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ // We might need to guard the {replacement} if the type of the
+ // {node} is more precise than the type of the {replacement}.
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+ replacement = graph()->NewNode(common()->TypeGuard(node_type),
+ replacement, control);
+ NodeProperties::SetType(replacement, node_type);
+ }
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
}
- ReplaceWithValue(node, replacement, effect);
- return Replace(replacement);
- }
+ state = state->AddElement(object, index, node, zone());
+ return UpdateState(node, state);
}
- state = state->AddElement(object, index, node, zone());
- return UpdateState(node, state);
+ return NoChange();
}
Reduction LoadElimination::ReduceStoreElement(Node* node) {
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 9bade2732a..d5c37860f6 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -234,12 +234,14 @@ void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
// Normalize to less than comparison.
switch (cond->opcode()) {
case IrOpcode::kJSLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
AddCmpToLimits(limits, cond, InductionVariable::kStrict, polarity);
break;
case IrOpcode::kJSGreaterThan:
AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, !polarity);
break;
case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, polarity);
break;
case IrOpcode::kJSGreaterThanOrEqual:
@@ -315,7 +317,8 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
// TODO(jarin) Support both sides.
if (arith->InputAt(0) != phi) {
- if (arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber ||
+ if ((arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber &&
+ arith->InputAt(0)->opcode() != IrOpcode::kSpeculativeToNumber) ||
arith->InputAt(0)->InputAt(0) != phi) {
return nullptr;
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 2d5fce5f0a..6ac7a163e1 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -57,7 +57,6 @@ class MachineRepresentationInferrer {
case IrOpcode::kTryTruncateFloat32ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
- case IrOpcode::kTryTruncateFloat64ToUint64:
CHECK_LE(index, static_cast<size_t>(1));
return index == 0 ? MachineRepresentation::kWord64
: MachineRepresentation::kBit;
@@ -85,6 +84,7 @@ class MachineRepresentationInferrer {
void Run() {
auto blocks = schedule_->all_blocks();
for (BasicBlock* block : *blocks) {
+ current_block_ = block;
for (size_t i = 0; i <= block->NodeCount(); ++i) {
Node const* node =
i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
@@ -148,6 +148,16 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] =
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
break;
+ case IrOpcode::kAtomicExchange:
+ case IrOpcode::kAtomicCompareExchange:
+ case IrOpcode::kAtomicAdd:
+ case IrOpcode::kAtomicSub:
+ case IrOpcode::kAtomicAnd:
+ case IrOpcode::kAtomicOr:
+ case IrOpcode::kAtomicXor:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ AtomicOpRepresentationOf(node->op()).representation());
+ break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
representation_vector_[node->id()] = PromoteRepresentation(
@@ -207,9 +217,9 @@ class MachineRepresentationInferrer {
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
case IrOpcode::kBitcastFloat32ToInt32:
- case IrOpcode::kInt32x4ExtractLane:
- case IrOpcode::kInt16x8ExtractLane:
- case IrOpcode::kInt8x16ExtractLane:
+ case IrOpcode::kI32x4ExtractLane:
+ case IrOpcode::kI16x8ExtractLane:
+ case IrOpcode::kI8x16ExtractLane:
case IrOpcode::kInt32Constant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kTruncateFloat64ToWord32:
@@ -230,6 +240,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kInt64Constant:
case IrOpcode::kRelocatableInt64Constant:
case IrOpcode::kBitcastFloat64ToInt64:
+ case IrOpcode::kChangeFloat64ToUint64:
MACHINE_BINOP_64_LIST(LABEL) {
representation_vector_[node->id()] =
MachineRepresentation::kWord64;
@@ -271,6 +282,7 @@ class MachineRepresentationInferrer {
Schedule const* const schedule_;
Linkage const* const linkage_;
ZoneVector<MachineRepresentation> representation_vector_;
+ BasicBlock* current_block_;
};
class MachineRepresentationChecker {
@@ -282,11 +294,13 @@ class MachineRepresentationChecker {
: schedule_(schedule),
inferrer_(inferrer),
is_stub_(is_stub),
- name_(name) {}
+ name_(name),
+ current_block_(nullptr) {}
void Run() {
BasicBlockVector const* blocks = schedule_->all_blocks();
for (BasicBlock* block : *blocks) {
+ current_block_ = block;
for (size_t i = 0; i <= block->NodeCount(); ++i) {
Node const* node =
i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
@@ -331,6 +345,7 @@ class MachineRepresentationChecker {
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kBitcastFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
@@ -353,9 +368,9 @@ class MachineRepresentationChecker {
CheckValueInputForInt64Op(node, 0);
CheckValueInputForInt64Op(node, 1);
break;
- case IrOpcode::kInt32x4ExtractLane:
- case IrOpcode::kInt16x8ExtractLane:
- case IrOpcode::kInt8x16ExtractLane:
+ case IrOpcode::kI32x4ExtractLane:
+ case IrOpcode::kI16x8ExtractLane:
+ case IrOpcode::kI8x16ExtractLane:
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
@@ -423,6 +438,7 @@ class MachineRepresentationChecker {
}
break;
case IrOpcode::kFloat64SilenceNaN:
+ case IrOpcode::kChangeFloat64ToUint64:
MACHINE_FLOAT64_UNOP_LIST(LABEL) {
CheckValueInputForFloat64Op(node, 0);
}
@@ -438,6 +454,13 @@ class MachineRepresentationChecker {
node, 1, MachineType::PointerRepresentation());
break;
case IrOpcode::kStore:
+ case IrOpcode::kAtomicStore:
+ case IrOpcode::kAtomicExchange:
+ case IrOpcode::kAtomicAdd:
+ case IrOpcode::kAtomicSub:
+ case IrOpcode::kAtomicAnd:
+ case IrOpcode::kAtomicOr:
+ case IrOpcode::kAtomicXor:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -452,7 +475,7 @@ class MachineRepresentationChecker {
node, 2, inferrer_->GetRepresentation(node));
}
break;
- case IrOpcode::kAtomicStore:
+ case IrOpcode::kAtomicCompareExchange:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -461,10 +484,13 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
CheckValueInputIsTagged(node, 2);
+ CheckValueInputIsTagged(node, 3);
break;
default:
CheckValueInputRepresentationIs(
node, 2, inferrer_->GetRepresentation(node));
+ CheckValueInputRepresentationIs(
+ node, 3, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kPhi:
@@ -521,8 +547,6 @@ class MachineRepresentationChecker {
break;
}
case IrOpcode::kThrow:
- CheckValueInputIsTagged(node, 0);
- break;
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
break;
@@ -792,7 +816,8 @@ class MachineRepresentationChecker {
void PrintDebugHelp(std::ostream& out, Node const* node) {
if (DEBUG_BOOL) {
- out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
+ out << "\n# Current block: " << *current_block_;
+ out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
<< node->id() << " for debugging.";
}
}
@@ -801,6 +826,7 @@ class MachineRepresentationChecker {
MachineRepresentationInferrer const* const inferrer_;
bool is_stub_;
const char* name_;
+ BasicBlock* current_block_;
};
} // namespace
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 854c22e15e..2e66b17a9d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -80,6 +80,10 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
return OpParameter<MachineRepresentation>(op);
}
+MachineType AtomicOpRepresentationOf(Operator const* op) {
+ return OpParameter<MachineType>(op);
+}
+
#define PURE_BINARY_OP_LIST_32(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -124,223 +128,222 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)
-#define PURE_OP_LIST(V) \
- PURE_BINARY_OP_LIST_32(V) \
- PURE_BINARY_OP_LIST_64(V) \
- V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
- V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
- V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Add, Operator::kCommutative, 2, 0, 1) \
- V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Acos, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Acosh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Asin, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Asinh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cosh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Add, Operator::kCommutative, 2, 0, 1) \
- V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Pow, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Sinh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Tanh, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
- V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
- V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
- V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
- V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
- V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
- V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
- V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
- V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load3, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Store, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store1, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store2, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store3, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Not, Operator::kNoProperties, 1, 0, 1) \
- V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Simd16x8Select, Operator::kNoProperties, 3, 0, 1) \
- V(Simd8x16Select, Operator::kNoProperties, 3, 0, 1)
+#define PURE_OP_LIST(V) \
+ PURE_BINARY_OP_LIST_32(V) \
+ PURE_BINARY_OP_LIST_64(V) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Pow, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
+ V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4Div, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4MinNum, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4RecipRefine, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4RecipSqrtRefine, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Eq, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4SConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4MinS, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4MaxS, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4LtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4UConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4UConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4UConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4MinU, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4MaxU, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4LtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8MinS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8MaxS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8LtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8UConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8AddSaturateU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8MinU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8MaxU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8LtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16Add, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16MinS, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16LtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16MinU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16MaxU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16LtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(S128Load, Operator::kNoProperties, 2, 0, 1) \
+ V(S128Store, Operator::kNoProperties, 3, 0, 1) \
+ V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
+ V(S128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S128Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S16x8Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S8x16Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S1x4Zero, Operator::kNoProperties, 0, 0, 1) \
+ V(S1x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x8Zero, Operator::kNoProperties, 0, 0, 1) \
+ V(S1x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x8Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x16Zero, Operator::kNoProperties, 0, 0, 1) \
+ V(S1x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(S1x16Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -349,6 +352,8 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
+ V(Int64AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
@@ -411,10 +416,10 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(kWord32)
#define SIMD_LANE_OP_LIST(V) \
- V(Float32x4, 4) \
- V(Int32x4, 4) \
- V(Int16x8, 8) \
- V(Int8x16, 16)
+ V(F32x4, 4) \
+ V(I32x4, 4) \
+ V(I16x8, 8) \
+ V(I8x16, 16)
#define SIMD_FORMAT_LIST(V) \
V(32x4, 32) \
@@ -597,6 +602,38 @@ struct MachineOperatorGlobalCache {
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef STORE
+#define ATOMIC_OP(op, type) \
+ struct op##type##Operator : public Operator1<MachineType> { \
+ op##type##Operator() \
+ : Operator1<MachineType>(IrOpcode::k##op, \
+ Operator::kNoDeopt | Operator::kNoThrow, #op, \
+ 3, 1, 1, 1, 1, 0, MachineType::type()) {} \
+ }; \
+ op##type##Operator k##op##type;
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(AtomicExchange, type) \
+ ATOMIC_OP(AtomicAdd, type) \
+ ATOMIC_OP(AtomicSub, type) \
+ ATOMIC_OP(AtomicAnd, type) \
+ ATOMIC_OP(AtomicOr, type) \
+ ATOMIC_OP(AtomicXor, type)
+ ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
+#undef ATOMIC_OP_LIST
+#undef ATOMIC_OP
+
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kAtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "AtomicCompareExchange", 4, 1, 1, 1, 1, 0, \
+ MachineType::Type()) {} \
+ }; \
+ AtomicCompareExchange##Type##Operator kAtomicCompareExchange##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -853,6 +890,83 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
+#define EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicExchange##kRep; \
+ }
+ ATOMIC_TYPE_LIST(EXCHANGE)
+#undef EXCHANGE
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
+#define COMPARE_EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicCompareExchange##kRep; \
+ }
+ ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
+#undef COMPARE_EXCHANGE
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
+#define ADD(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicAdd##kRep; \
+ }
+ ATOMIC_TYPE_LIST(ADD)
+#undef ADD
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
+#define SUB(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicSub##kRep; \
+ }
+ ATOMIC_TYPE_LIST(SUB)
+#undef SUB
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
+#define AND(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicAnd##kRep; \
+ }
+ ATOMIC_TYPE_LIST(AND)
+#undef AND
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
+#define OR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicOr##kRep; \
+ }
+ ATOMIC_TYPE_LIST(OR)
+#undef OR
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
+#define XOR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kAtomicXor##kRep; \
+ }
+ ATOMIC_TYPE_LIST(XOR)
+#undef XOR
+ UNREACHABLE();
+ return nullptr;
+}
+
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
@@ -871,38 +985,35 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
#undef SIMD_LANE_OPS
-#define SIMD_SHIFT_OPS(format, bits) \
- const Operator* MachineOperatorBuilder::Int##format##ShiftLeftByScalar( \
- int32_t shift) { \
- DCHECK(0 <= shift && shift < bits); \
- return new (zone_) Operator1<int32_t>( \
- IrOpcode::kInt##format##ShiftLeftByScalar, Operator::kPure, \
- "Shift left", 1, 0, 0, 1, 0, 0, shift); \
- } \
- const Operator* MachineOperatorBuilder::Int##format##ShiftRightByScalar( \
- int32_t shift) { \
- DCHECK(0 < shift && shift <= bits); \
- return new (zone_) Operator1<int32_t>( \
- IrOpcode::kInt##format##ShiftRightByScalar, Operator::kPure, \
- "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \
- } \
- const Operator* MachineOperatorBuilder::Uint##format##ShiftRightByScalar( \
- int32_t shift) { \
- DCHECK(0 <= shift && shift < bits); \
- return new (zone_) Operator1<int32_t>( \
- IrOpcode::kUint##format##ShiftRightByScalar, Operator::kPure, \
- "Shift right", 1, 0, 0, 1, 0, 0, shift); \
+#define SIMD_SHIFT_OPS(format, bits) \
+ const Operator* MachineOperatorBuilder::I##format##Shl(int32_t shift) { \
+ DCHECK(0 <= shift && shift < bits); \
+ return new (zone_) \
+ Operator1<int32_t>(IrOpcode::kI##format##Shl, Operator::kPure, \
+ "Shift left", 1, 0, 0, 1, 0, 0, shift); \
+ } \
+ const Operator* MachineOperatorBuilder::I##format##ShrS(int32_t shift) { \
+ DCHECK(0 < shift && shift <= bits); \
+ return new (zone_) \
+ Operator1<int32_t>(IrOpcode::kI##format##ShrS, Operator::kPure, \
+ "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \
+ } \
+ const Operator* MachineOperatorBuilder::I##format##ShrU(int32_t shift) { \
+ DCHECK(0 <= shift && shift < bits); \
+ return new (zone_) \
+ Operator1<int32_t>(IrOpcode::kI##format##ShrU, Operator::kPure, \
+ "Shift right", 1, 0, 0, 1, 0, 0, shift); \
}
SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
// TODO(bbudge) Add Shuffle, DCHECKs based on format.
-#define SIMD_PERMUTE_OPS(format, bits) \
- const Operator* MachineOperatorBuilder::Simd##format##Swizzle( \
- uint32_t swizzle) { \
- return new (zone_) \
- Operator1<uint32_t>(IrOpcode::kSimd##format##Swizzle, Operator::kPure, \
- "Swizzle", 2, 0, 0, 1, 0, 0, swizzle); \
+#define SIMD_PERMUTE_OPS(format, bits) \
+ const Operator* MachineOperatorBuilder::S##format##Swizzle( \
+ uint32_t swizzle) { \
+ return new (zone_) \
+ Operator1<uint32_t>(IrOpcode::kS##format##Swizzle, Operator::kPure, \
+ "Swizzle", 2, 0, 0, 1, 0, 0, swizzle); \
}
SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
#undef SIMD_PERMUTE_OPS
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0558279183..9ffb355362 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -97,6 +97,8 @@ int StackSlotSizeOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
+MachineType AtomicOpRepresentationOf(Operator const* op);
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -127,13 +129,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64ReverseBits = 1u << 17,
kWord32ReverseBytes = 1u << 18,
kWord64ReverseBytes = 1u << 19,
- kAllOptionalOps = kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
- kFloat64RoundUp | kFloat32RoundTruncate |
- kFloat64RoundTruncate | kFloat64RoundTiesAway |
- kFloat32RoundTiesEven | kFloat64RoundTiesEven |
- kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits |
- kWord32ReverseBytes | kWord64ReverseBytes
+ kInt32AbsWithOverflow = 1u << 20,
+ kInt64AbsWithOverflow = 1u << 21,
+ kAllOptionalOps =
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
+ kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+ kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
+ kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -180,6 +184,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
bool IsUnalignedSupported(const Vector<MachineType>& unsupported,
const MachineType& machineType,
uint8_t alignment) const {
+ // All accesses of bytes in memory are aligned.
+ DCHECK_NE(machineType.representation(), MachineRepresentation::kWord8);
if (unalignedSupport_ == kFullSupport) {
return true;
} else if (unalignedSupport_ == kNoSupport) {
@@ -226,6 +232,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64ReverseBits();
const OptionalOperator Word32ReverseBytes();
const OptionalOperator Word64ReverseBytes();
+ const OptionalOperator Int32AbsWithOverflow();
+ const OptionalOperator Int64AbsWithOverflow();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -299,6 +307,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* ChangeFloat64ToUint64();
const Operator* TruncateFloat64ToUint32();
const Operator* TruncateFloat32ToInt32();
const Operator* TruncateFloat32ToUint32();
@@ -425,161 +434,161 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
- const Operator* CreateFloat32x4();
- const Operator* Float32x4ExtractLane(int32_t);
- const Operator* Float32x4ReplaceLane(int32_t);
- const Operator* Float32x4Abs();
- const Operator* Float32x4Neg();
- const Operator* Float32x4Sqrt();
- const Operator* Float32x4RecipApprox();
- const Operator* Float32x4RecipSqrtApprox();
- const Operator* Float32x4Add();
- const Operator* Float32x4Sub();
- const Operator* Float32x4Mul();
- const Operator* Float32x4Div();
- const Operator* Float32x4Min();
- const Operator* Float32x4Max();
- const Operator* Float32x4MinNum();
- const Operator* Float32x4MaxNum();
- const Operator* Float32x4Equal();
- const Operator* Float32x4NotEqual();
- const Operator* Float32x4LessThan();
- const Operator* Float32x4LessThanOrEqual();
- const Operator* Float32x4GreaterThan();
- const Operator* Float32x4GreaterThanOrEqual();
- const Operator* Float32x4FromInt32x4();
- const Operator* Float32x4FromUint32x4();
-
- const Operator* CreateInt32x4();
- const Operator* Int32x4ExtractLane(int32_t);
- const Operator* Int32x4ReplaceLane(int32_t);
- const Operator* Int32x4Neg();
- const Operator* Int32x4Add();
- const Operator* Int32x4Sub();
- const Operator* Int32x4Mul();
- const Operator* Int32x4Min();
- const Operator* Int32x4Max();
- const Operator* Int32x4ShiftLeftByScalar(int32_t);
- const Operator* Int32x4ShiftRightByScalar(int32_t);
- const Operator* Int32x4Equal();
- const Operator* Int32x4NotEqual();
- const Operator* Int32x4LessThan();
- const Operator* Int32x4LessThanOrEqual();
- const Operator* Int32x4GreaterThan();
- const Operator* Int32x4GreaterThanOrEqual();
- const Operator* Int32x4FromFloat32x4();
-
- const Operator* Uint32x4Min();
- const Operator* Uint32x4Max();
- const Operator* Uint32x4ShiftRightByScalar(int32_t);
- const Operator* Uint32x4LessThan();
- const Operator* Uint32x4LessThanOrEqual();
- const Operator* Uint32x4GreaterThan();
- const Operator* Uint32x4GreaterThanOrEqual();
- const Operator* Uint32x4FromFloat32x4();
-
- const Operator* Bool32x4And();
- const Operator* Bool32x4Or();
- const Operator* Bool32x4Xor();
- const Operator* Bool32x4Not();
- const Operator* Bool32x4AnyTrue();
- const Operator* Bool32x4AllTrue();
-
- const Operator* CreateInt16x8();
- const Operator* Int16x8ExtractLane(int32_t);
- const Operator* Int16x8ReplaceLane(int32_t);
- const Operator* Int16x8Neg();
- const Operator* Int16x8Add();
- const Operator* Int16x8AddSaturate();
- const Operator* Int16x8Sub();
- const Operator* Int16x8SubSaturate();
- const Operator* Int16x8Mul();
- const Operator* Int16x8Min();
- const Operator* Int16x8Max();
- const Operator* Int16x8ShiftLeftByScalar(int32_t);
- const Operator* Int16x8ShiftRightByScalar(int32_t);
- const Operator* Int16x8Equal();
- const Operator* Int16x8NotEqual();
- const Operator* Int16x8LessThan();
- const Operator* Int16x8LessThanOrEqual();
- const Operator* Int16x8GreaterThan();
- const Operator* Int16x8GreaterThanOrEqual();
-
- const Operator* Uint16x8AddSaturate();
- const Operator* Uint16x8SubSaturate();
- const Operator* Uint16x8Min();
- const Operator* Uint16x8Max();
- const Operator* Uint16x8ShiftRightByScalar(int32_t);
- const Operator* Uint16x8LessThan();
- const Operator* Uint16x8LessThanOrEqual();
- const Operator* Uint16x8GreaterThan();
- const Operator* Uint16x8GreaterThanOrEqual();
-
- const Operator* Bool16x8And();
- const Operator* Bool16x8Or();
- const Operator* Bool16x8Xor();
- const Operator* Bool16x8Not();
- const Operator* Bool16x8AnyTrue();
- const Operator* Bool16x8AllTrue();
-
- const Operator* CreateInt8x16();
- const Operator* Int8x16ExtractLane(int32_t);
- const Operator* Int8x16ReplaceLane(int32_t);
- const Operator* Int8x16Neg();
- const Operator* Int8x16Add();
- const Operator* Int8x16AddSaturate();
- const Operator* Int8x16Sub();
- const Operator* Int8x16SubSaturate();
- const Operator* Int8x16Mul();
- const Operator* Int8x16Min();
- const Operator* Int8x16Max();
- const Operator* Int8x16ShiftLeftByScalar(int32_t);
- const Operator* Int8x16ShiftRightByScalar(int32_t);
- const Operator* Int8x16Equal();
- const Operator* Int8x16NotEqual();
- const Operator* Int8x16LessThan();
- const Operator* Int8x16LessThanOrEqual();
- const Operator* Int8x16GreaterThan();
- const Operator* Int8x16GreaterThanOrEqual();
-
- const Operator* Uint8x16AddSaturate();
- const Operator* Uint8x16SubSaturate();
- const Operator* Uint8x16Min();
- const Operator* Uint8x16Max();
- const Operator* Uint8x16ShiftRightByScalar(int32_t);
- const Operator* Uint8x16LessThan();
- const Operator* Uint8x16LessThanOrEqual();
- const Operator* Uint8x16GreaterThan();
- const Operator* Uint8x16GreaterThanOrEqual();
-
- const Operator* Bool8x16And();
- const Operator* Bool8x16Or();
- const Operator* Bool8x16Xor();
- const Operator* Bool8x16Not();
- const Operator* Bool8x16AnyTrue();
- const Operator* Bool8x16AllTrue();
-
- const Operator* Simd128Load();
- const Operator* Simd128Load1();
- const Operator* Simd128Load2();
- const Operator* Simd128Load3();
- const Operator* Simd128Store();
- const Operator* Simd128Store1();
- const Operator* Simd128Store2();
- const Operator* Simd128Store3();
- const Operator* Simd128And();
- const Operator* Simd128Or();
- const Operator* Simd128Xor();
- const Operator* Simd128Not();
- const Operator* Simd32x4Select();
- const Operator* Simd32x4Swizzle(uint32_t);
- const Operator* Simd32x4Shuffle();
- const Operator* Simd16x8Select();
- const Operator* Simd16x8Swizzle(uint32_t);
- const Operator* Simd16x8Shuffle();
- const Operator* Simd8x16Select();
- const Operator* Simd8x16Swizzle(uint32_t);
- const Operator* Simd8x16Shuffle();
+ const Operator* F32x4Splat();
+ const Operator* F32x4ExtractLane(int32_t);
+ const Operator* F32x4ReplaceLane(int32_t);
+ const Operator* F32x4SConvertI32x4();
+ const Operator* F32x4UConvertI32x4();
+ const Operator* F32x4Abs();
+ const Operator* F32x4Neg();
+ const Operator* F32x4Sqrt();
+ const Operator* F32x4RecipApprox();
+ const Operator* F32x4RecipSqrtApprox();
+ const Operator* F32x4Add();
+ const Operator* F32x4Sub();
+ const Operator* F32x4Mul();
+ const Operator* F32x4Div();
+ const Operator* F32x4Min();
+ const Operator* F32x4Max();
+ const Operator* F32x4MinNum();
+ const Operator* F32x4MaxNum();
+ const Operator* F32x4RecipRefine();
+ const Operator* F32x4RecipSqrtRefine();
+ const Operator* F32x4Eq();
+ const Operator* F32x4Ne();
+ const Operator* F32x4Lt();
+ const Operator* F32x4Le();
+
+ const Operator* I32x4Splat();
+ const Operator* I32x4ExtractLane(int32_t);
+ const Operator* I32x4ReplaceLane(int32_t);
+ const Operator* I32x4SConvertF32x4();
+ const Operator* I32x4SConvertI16x8Low();
+ const Operator* I32x4SConvertI16x8High();
+ const Operator* I32x4Neg();
+ const Operator* I32x4Shl(int32_t);
+ const Operator* I32x4ShrS(int32_t);
+ const Operator* I32x4Add();
+ const Operator* I32x4Sub();
+ const Operator* I32x4Mul();
+ const Operator* I32x4MinS();
+ const Operator* I32x4MaxS();
+ const Operator* I32x4Eq();
+ const Operator* I32x4Ne();
+ const Operator* I32x4LtS();
+ const Operator* I32x4LeS();
+
+ const Operator* I32x4UConvertF32x4();
+ const Operator* I32x4UConvertI16x8Low();
+ const Operator* I32x4UConvertI16x8High();
+ const Operator* I32x4ShrU(int32_t);
+ const Operator* I32x4MinU();
+ const Operator* I32x4MaxU();
+ const Operator* I32x4LtU();
+ const Operator* I32x4LeU();
+
+ const Operator* I16x8Splat();
+ const Operator* I16x8ExtractLane(int32_t);
+ const Operator* I16x8ReplaceLane(int32_t);
+ const Operator* I16x8SConvertI8x16Low();
+ const Operator* I16x8SConvertI8x16High();
+ const Operator* I16x8Neg();
+ const Operator* I16x8Shl(int32_t);
+ const Operator* I16x8ShrS(int32_t);
+ const Operator* I16x8SConvertI32x4();
+ const Operator* I16x8Add();
+ const Operator* I16x8AddSaturateS();
+ const Operator* I16x8Sub();
+ const Operator* I16x8SubSaturateS();
+ const Operator* I16x8Mul();
+ const Operator* I16x8MinS();
+ const Operator* I16x8MaxS();
+ const Operator* I16x8Eq();
+ const Operator* I16x8Ne();
+ const Operator* I16x8LtS();
+ const Operator* I16x8LeS();
+
+ const Operator* I16x8UConvertI8x16Low();
+ const Operator* I16x8UConvertI8x16High();
+ const Operator* I16x8ShrU(int32_t);
+ const Operator* I16x8UConvertI32x4();
+ const Operator* I16x8AddSaturateU();
+ const Operator* I16x8SubSaturateU();
+ const Operator* I16x8MinU();
+ const Operator* I16x8MaxU();
+ const Operator* I16x8LtU();
+ const Operator* I16x8LeU();
+
+ const Operator* I8x16Splat();
+ const Operator* I8x16ExtractLane(int32_t);
+ const Operator* I8x16ReplaceLane(int32_t);
+ const Operator* I8x16Neg();
+ const Operator* I8x16Shl(int32_t);
+ const Operator* I8x16ShrS(int32_t);
+ const Operator* I8x16SConvertI16x8();
+ const Operator* I8x16Add();
+ const Operator* I8x16AddSaturateS();
+ const Operator* I8x16Sub();
+ const Operator* I8x16SubSaturateS();
+ const Operator* I8x16Mul();
+ const Operator* I8x16MinS();
+ const Operator* I8x16MaxS();
+ const Operator* I8x16Eq();
+ const Operator* I8x16Ne();
+ const Operator* I8x16LtS();
+ const Operator* I8x16LeS();
+
+ const Operator* I8x16ShrU(int32_t);
+ const Operator* I8x16UConvertI16x8();
+ const Operator* I8x16AddSaturateU();
+ const Operator* I8x16SubSaturateU();
+ const Operator* I8x16MinU();
+ const Operator* I8x16MaxU();
+ const Operator* I8x16LtU();
+ const Operator* I8x16LeU();
+
+ const Operator* S128Load();
+ const Operator* S128Store();
+
+ const Operator* S128Zero();
+ const Operator* S128And();
+ const Operator* S128Or();
+ const Operator* S128Xor();
+ const Operator* S128Not();
+
+ const Operator* S32x4Select();
+ const Operator* S32x4Swizzle(uint32_t);
+ const Operator* S32x4Shuffle();
+ const Operator* S16x8Select();
+ const Operator* S16x8Swizzle(uint32_t);
+ const Operator* S16x8Shuffle();
+ const Operator* S8x16Select();
+ const Operator* S8x16Swizzle(uint32_t);
+ const Operator* S8x16Shuffle();
+
+ const Operator* S1x4Zero();
+ const Operator* S1x4And();
+ const Operator* S1x4Or();
+ const Operator* S1x4Xor();
+ const Operator* S1x4Not();
+ const Operator* S1x4AnyTrue();
+ const Operator* S1x4AllTrue();
+
+ const Operator* S1x8Zero();
+ const Operator* S1x8And();
+ const Operator* S1x8Or();
+ const Operator* S1x8Xor();
+ const Operator* S1x8Not();
+ const Operator* S1x8AnyTrue();
+ const Operator* S1x8AllTrue();
+
+ const Operator* S1x16Zero();
+ const Operator* S1x16And();
+ const Operator* S1x16Or();
+ const Operator* S1x16Xor();
+ const Operator* S1x16Not();
+ const Operator* S1x16AnyTrue();
+ const Operator* S1x16AllTrue();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -612,6 +621,20 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
const Operator* AtomicStore(MachineRepresentation rep);
+ // atomic-exchange [base + index], value
+ const Operator* AtomicExchange(MachineType rep);
+ // atomic-compare-exchange [base + index], old_value, new_value
+ const Operator* AtomicCompareExchange(MachineType rep);
+ // atomic-add [base + index], value
+ const Operator* AtomicAdd(MachineType rep);
+ // atomic-sub [base + index], value
+ const Operator* AtomicSub(MachineType rep);
+ // atomic-and [base + index], value
+ const Operator* AtomicAnd(MachineType rep);
+ // atomic-or [base + index], value
+ const Operator* AtomicOr(MachineType rep);
+ // atomic-xor [base + index], value
+ const Operator* AtomicXor(MachineType rep);
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index db4b5293a4..628c79025e 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -1472,7 +1472,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsLdc1:
- __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kMipsUldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
@@ -1482,7 +1482,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ sdc1(ft, i.MemoryOperand());
+ __ Sdc1(ft, i.MemoryOperand());
break;
}
case kMipsUsdc1: {
@@ -1495,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1512,7 +1512,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1545,7 +1545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(sb);
@@ -1560,7 +1560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
break;
case kCheckedLoadWord64:
case kCheckedStoreWord64:
@@ -1590,6 +1590,183 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
+ UNREACHABLE();
+ break;
+ case kMipsS128Zero: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMipsI32x4Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMipsI32x4ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMipsI32x4ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMipsI32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
+ __ fill_w(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kMipsF32x4ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
+ __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
+ break;
+ }
+ case kMipsF32x4ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
+ __ insert_w(dst, i.InputInt8(1), kScratchReg);
+ break;
+ }
+ case kMipsF32x4SConvertI32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsF32x4UConvertI32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMipsI32x4Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMipsI32x4ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMipsI32x4ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMipsI32x4MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS32x4Select: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+ __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
+ i.InputSimd128Register(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1996,7 +2173,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2123,6 +2302,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2208,7 +2388,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
if (destination->IsFPStackSlot()) {
- __ sdc1(dst, g.ToMemOperand(destination));
+ __ Sdc1(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
@@ -2221,7 +2401,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
- __ sdc1(src, g.ToMemOperand(destination));
+ __ Sdc1(src, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) {
__ swc1(src, g.ToMemOperand(destination));
} else {
@@ -2235,7 +2415,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) {
- __ ldc1(g.ToDoubleRegister(destination), src);
+ __ Ldc1(g.ToDoubleRegister(destination), src);
} else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(g.ToDoubleRegister(destination), src);
} else {
@@ -2245,8 +2425,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
FPURegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) {
- __ ldc1(temp, src);
- __ sdc1(temp, g.ToMemOperand(destination));
+ __ Ldc1(temp, src);
+ __ Sdc1(temp, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(temp, src);
__ swc1(temp, g.ToMemOperand(destination));
@@ -2307,8 +2487,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ Move(temp, src);
- __ ldc1(src, dst);
- __ sdc1(temp, dst);
+ __ Ldc1(src, dst);
+ __ Sdc1(temp, dst);
} else if (rep == MachineRepresentation::kFloat32) {
__ Move(temp, src);
__ lwc1(src, dst);
@@ -2328,12 +2508,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
if (rep == MachineRepresentation::kFloat64) {
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
__ lw(temp_0, src1);
__ sw(temp_0, dst1);
- __ sdc1(temp_1, src0);
+ __ Sdc1(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index edff56f72b..7d0e755617 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -132,7 +132,29 @@ namespace compiler {
V(MipsByteSwap32) \
V(MipsStackClaim) \
V(MipsSeb) \
- V(MipsSeh)
+ V(MipsSeh) \
+ V(MipsS128Zero) \
+ V(MipsI32x4Splat) \
+ V(MipsI32x4ExtractLane) \
+ V(MipsI32x4ReplaceLane) \
+ V(MipsI32x4Add) \
+ V(MipsI32x4Sub) \
+ V(MipsF32x4Splat) \
+ V(MipsF32x4ExtractLane) \
+ V(MipsF32x4ReplaceLane) \
+ V(MipsF32x4SConvertI32x4) \
+ V(MipsF32x4UConvertI32x4) \
+ V(MipsI32x4Mul) \
+ V(MipsI32x4MaxS) \
+ V(MipsI32x4MinS) \
+ V(MipsI32x4Eq) \
+ V(MipsI32x4Ne) \
+ V(MipsI32x4Shl) \
+ V(MipsI32x4ShrS) \
+ V(MipsI32x4ShrU) \
+ V(MipsI32x4MaxU) \
+ V(MipsI32x4MinU) \
+ V(MipsS32x4Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index d0ceac12b9..c99be67dd7 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -128,6 +128,12 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
@@ -136,6 +142,22 @@ static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(0)));
}
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
@@ -498,7 +520,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1f;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -1885,6 +1907,134 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
+void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitI32x4Splat(Node* node) {
+ VisitRR(this, kMipsI32x4Splat, node);
+}
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
+ VisitRRI(this, kMipsI32x4ExtractLane, node);
+}
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsI32x4ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI32x4Add(Node* node) {
+ VisitRRR(this, kMipsI32x4Add, node);
+}
+
+void InstructionSelector::VisitI32x4Sub(Node* node) {
+ VisitRRR(this, kMipsI32x4Sub, node);
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x4Zero(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x8Zero(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x16Zero(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ VisitRR(this, kMipsF32x4Splat, node);
+}
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
+ VisitRRI(this, kMipsF32x4ExtractLane, node);
+}
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsF32x4ReplaceLane, node);
+}
+
+void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
+ VisitRR(this, kMipsF32x4SConvertI32x4, node);
+}
+
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ VisitRR(this, kMipsF32x4UConvertI32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Mul(Node* node) {
+ VisitRRR(this, kMipsI32x4Mul, node);
+}
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) {
+ VisitRRR(this, kMipsI32x4MaxS, node);
+}
+
+void InstructionSelector::VisitI32x4MinS(Node* node) {
+ VisitRRR(this, kMipsI32x4MinS, node);
+}
+
+void InstructionSelector::VisitI32x4Eq(Node* node) {
+ VisitRRR(this, kMipsI32x4Eq, node);
+}
+
+void InstructionSelector::VisitI32x4Ne(Node* node) {
+ VisitRRR(this, kMipsI32x4Ne, node);
+}
+
+void InstructionSelector::VisitI32x4Shl(Node* node) {
+ VisitRRI(this, kMipsI32x4Shl, node);
+}
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) {
+ VisitRRI(this, kMipsI32x4ShrS, node);
+}
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) {
+ VisitRRI(this, kMipsI32x4ShrU, node);
+}
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) {
+ VisitRRR(this, kMipsI32x4MaxU, node);
+}
+
+void InstructionSelector::VisitI32x4MinU(Node* node) {
+ VisitRRR(this, kMipsI32x4MinU, node);
+}
+
+void InstructionSelector::VisitS32x4Select(Node* node) {
+ VisitRRRR(this, kMipsS32x4Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 3ab85e03b7..f1831adf63 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -1221,7 +1221,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
__ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
- } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
+ } else if (size > 32 && size <= 64 && pos >= 0 && pos < 32) {
__ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
} else {
@@ -1916,10 +1916,187 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
+ UNREACHABLE();
+ break;
case kMips64AssertEqual:
__ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
+ case kMips64S128Zero: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMips64I32x4Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I32x4ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I32x4ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMips64I32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
+ __ fill_w(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kMips64F32x4ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
+ __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
+ break;
+ }
+ case kMips64F32x4ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
+ __ insert_w(dst, i.InputInt8(1), kScratchReg);
+ break;
+ }
+ case kMips64F32x4SConvertI32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64F32x4UConvertI32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMips64I32x4Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMips64I32x4ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMips64I32x4ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kMips64I32x4MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S32x4Select: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+ __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
+ i.InputSimd128Register(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2341,7 +2518,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2465,6 +2644,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 0c0e1aa61e..5d22bc1eba 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -166,7 +166,29 @@ namespace compiler {
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
- V(Mips64AssertEqual)
+ V(Mips64AssertEqual) \
+ V(Mips64S128Zero) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64S32x4Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 4f19a17a30..4e5c4e847e 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -141,6 +141,22 @@ static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(0)));
}
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
@@ -150,6 +166,12 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
@@ -686,7 +708,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1f;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -779,7 +801,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (m.left().IsWord64And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x3f;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -1231,6 +1253,10 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
}
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kMips64TruncUlD, node);
+}
+
void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
}
@@ -2632,6 +2658,134 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
+void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitI32x4Splat(Node* node) {
+ VisitRR(this, kMips64I32x4Splat, node);
+}
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
+ VisitRRI(this, kMips64I32x4ExtractLane, node);
+}
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64I32x4ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI32x4Add(Node* node) {
+ VisitRRR(this, kMips64I32x4Add, node);
+}
+
+void InstructionSelector::VisitI32x4Sub(Node* node) {
+ VisitRRR(this, kMips64I32x4Sub, node);
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x4Zero(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x8Zero(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitS1x16Zero(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
+}
+
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ VisitRR(this, kMips64F32x4Splat, node);
+}
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
+ VisitRRI(this, kMips64F32x4ExtractLane, node);
+}
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64F32x4ReplaceLane, node);
+}
+
+void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
+ VisitRR(this, kMips64F32x4SConvertI32x4, node);
+}
+
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ VisitRR(this, kMips64F32x4UConvertI32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Mul(Node* node) {
+ VisitRRR(this, kMips64I32x4Mul, node);
+}
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) {
+ VisitRRR(this, kMips64I32x4MaxS, node);
+}
+
+void InstructionSelector::VisitI32x4MinS(Node* node) {
+ VisitRRR(this, kMips64I32x4MinS, node);
+}
+
+void InstructionSelector::VisitI32x4Eq(Node* node) {
+ VisitRRR(this, kMips64I32x4Eq, node);
+}
+
+void InstructionSelector::VisitI32x4Ne(Node* node) {
+ VisitRRR(this, kMips64I32x4Ne, node);
+}
+
+void InstructionSelector::VisitI32x4Shl(Node* node) {
+ VisitRRI(this, kMips64I32x4Shl, node);
+}
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) {
+ VisitRRI(this, kMips64I32x4ShrS, node);
+}
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) {
+ VisitRRI(this, kMips64I32x4ShrU, node);
+}
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) {
+ VisitRRR(this, kMips64I32x4MaxU, node);
+}
+
+void InstructionSelector::VisitI32x4MinU(Node* node) {
+ VisitRRR(this, kMips64I32x4MinU, node);
+}
+
+void InstructionSelector::VisitS32x4Select(Node* node) {
+ VisitRRRR(this, kMips64S32x4Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index d2bdb8bff5..550317d248 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -252,6 +252,9 @@ struct BinopMatcher : public NodeMatcher {
protected:
void SwapInputs() {
std::swap(left_, right_);
+ // TODO(tebbi): This modification should notify the reducers using
+ // BinopMatcher. Alternatively, all reducers (especially value numbering)
+ // could ignore the ordering for commutative binops.
node()->ReplaceInput(0, left().node());
node()->ReplaceInput(1, right().node());
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 9243a08583..a45f7f7a79 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -126,11 +126,14 @@ bool NodeProperties::IsControlEdge(Edge edge) {
// static
-bool NodeProperties::IsExceptionalCall(Node* node) {
+bool NodeProperties::IsExceptionalCall(Node* node, Node** out_exception) {
if (node->op()->HasProperty(Operator::kNoThrow)) return false;
for (Edge const edge : node->use_edges()) {
if (!NodeProperties::IsControlEdge(edge)) continue;
- if (edge.from()->opcode() == IrOpcode::kIfException) return true;
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ if (out_exception != nullptr) *out_exception = edge.from();
+ return true;
+ }
}
return false;
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5ed85402d1..aa35ea84e0 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -75,8 +75,9 @@ class V8_EXPORT_PRIVATE NodeProperties final {
}
// Determines whether exceptions thrown by the given node are handled locally
- // within the graph (i.e. an IfException projection is present).
- static bool IsExceptionalCall(Node* node);
+ // within the graph (i.e. an IfException projection is present). Optionally
+ // the present IfException projection is returned via {out_exception}.
+ static bool IsExceptionalCall(Node* node, Node** out_exception = nullptr);
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index b50754c235..18736a1f56 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -48,29 +48,30 @@
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
-#define INNER_OP_LIST(V) \
- V(Select) \
- V(Phi) \
- V(EffectPhi) \
- V(InductionVariablePhi) \
- V(Checkpoint) \
- V(BeginRegion) \
- V(FinishRegion) \
- V(FrameState) \
- V(StateValues) \
- V(TypedStateValues) \
- V(ArgumentsObjectState) \
- V(ObjectState) \
- V(TypedObjectState) \
- V(Call) \
- V(Parameter) \
- V(OsrValue) \
- V(OsrGuard) \
- V(LoopExit) \
- V(LoopExitValue) \
- V(LoopExitEffect) \
- V(Projection) \
- V(Retain) \
+#define INNER_OP_LIST(V) \
+ V(Select) \
+ V(Phi) \
+ V(EffectPhi) \
+ V(InductionVariablePhi) \
+ V(Checkpoint) \
+ V(BeginRegion) \
+ V(FinishRegion) \
+ V(FrameState) \
+ V(StateValues) \
+ V(TypedStateValues) \
+ V(ArgumentsElementsState) \
+ V(ArgumentsLengthState) \
+ V(ObjectState) \
+ V(TypedObjectState) \
+ V(Call) \
+ V(Parameter) \
+ V(OsrValue) \
+ V(OsrGuard) \
+ V(LoopExit) \
+ V(LoopExitValue) \
+ V(LoopExitEffect) \
+ V(Projection) \
+ V(Retain) \
V(TypeGuard)
#define COMMON_OP_LIST(V) \
@@ -81,9 +82,7 @@
// Opcodes for JavaScript operators.
#define JS_COMPARE_BINOP_LIST(V) \
V(JSEqual) \
- V(JSNotEqual) \
V(JSStrictEqual) \
- V(JSStrictNotEqual) \
V(JSLessThan) \
V(JSGreaterThan) \
V(JSLessThanOrEqual) \
@@ -202,7 +201,8 @@
V(ChangeBitToTagged) \
V(TruncateTaggedToWord32) \
V(TruncateTaggedToFloat64) \
- V(TruncateTaggedToBit)
+ V(TruncateTaggedToBit) \
+ V(TruncateTaggedPointerToBit)
#define SIMPLIFIED_CHECKED_OP_LIST(V) \
V(CheckedInt32Add) \
@@ -301,6 +301,8 @@
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
+#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber)
+
#define SIMPLIFIED_OTHER_OP_LIST(V) \
V(PlainPrimitiveToNumber) \
V(PlainPrimitiveToWord32) \
@@ -333,13 +335,16 @@
V(StoreElement) \
V(StoreTypedElement) \
V(ObjectIsDetectableCallable) \
+ V(ObjectIsNaN) \
V(ObjectIsNonCallable) \
V(ObjectIsNumber) \
V(ObjectIsReceiver) \
V(ObjectIsSmi) \
V(ObjectIsString) \
+ V(ObjectIsSymbol) \
V(ObjectIsUndetectable) \
- V(NewRestParameterElements) \
+ V(ArgumentsFrame) \
+ V(ArgumentsLength) \
V(NewUnmappedArgumentsElements) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
@@ -353,6 +358,7 @@
SIMPLIFIED_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \
SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
@@ -377,6 +383,7 @@
#define MACHINE_UNOP_32_LIST(V) \
V(Word32Clz) \
V(Word32Ctz) \
+ V(Int32AbsWithOverflow) \
V(Word32ReverseBits) \
V(Word32ReverseBytes)
@@ -496,6 +503,7 @@
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
+ V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
@@ -503,6 +511,7 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
@@ -549,169 +558,173 @@
V(ProtectedStore) \
V(AtomicLoad) \
V(AtomicStore) \
+ V(AtomicExchange) \
+ V(AtomicCompareExchange) \
+ V(AtomicAdd) \
+ V(AtomicSub) \
+ V(AtomicAnd) \
+ V(AtomicOr) \
+ V(AtomicXor) \
V(UnsafePointerAdd)
-#define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
- V(CreateFloat32x4) \
- V(Float32x4ReplaceLane) \
- V(Float32x4Abs) \
- V(Float32x4Neg) \
- V(Float32x4Sqrt) \
- V(Float32x4RecipApprox) \
- V(Float32x4RecipSqrtApprox) \
- V(Float32x4Add) \
- V(Float32x4Sub) \
- V(Float32x4Mul) \
- V(Float32x4Div) \
- V(Float32x4Min) \
- V(Float32x4Max) \
- V(Float32x4MinNum) \
- V(Float32x4MaxNum) \
- V(Float32x4Equal) \
- V(Float32x4NotEqual) \
- V(Float32x4LessThan) \
- V(Float32x4LessThanOrEqual) \
- V(Float32x4GreaterThan) \
- V(Float32x4GreaterThanOrEqual) \
- V(Float32x4FromInt32x4) \
- V(Float32x4FromUint32x4) \
- V(CreateInt32x4) \
- V(Int32x4ReplaceLane) \
- V(Int32x4Neg) \
- V(Int32x4Add) \
- V(Int32x4Sub) \
- V(Int32x4Mul) \
- V(Int32x4Min) \
- V(Int32x4Max) \
- V(Int32x4ShiftLeftByScalar) \
- V(Int32x4ShiftRightByScalar) \
- V(Int32x4Equal) \
- V(Int32x4NotEqual) \
- V(Int32x4LessThan) \
- V(Int32x4LessThanOrEqual) \
- V(Int32x4GreaterThan) \
- V(Int32x4GreaterThanOrEqual) \
- V(Int32x4FromFloat32x4) \
- V(Uint32x4Min) \
- V(Uint32x4Max) \
- V(Uint32x4ShiftLeftByScalar) \
- V(Uint32x4ShiftRightByScalar) \
- V(Uint32x4LessThan) \
- V(Uint32x4LessThanOrEqual) \
- V(Uint32x4GreaterThan) \
- V(Uint32x4GreaterThanOrEqual) \
- V(Uint32x4FromFloat32x4) \
- V(Bool32x4And) \
- V(Bool32x4Or) \
- V(Bool32x4Xor) \
- V(Bool32x4Not) \
- V(CreateInt16x8) \
- V(Int16x8ReplaceLane) \
- V(Int16x8Neg) \
- V(Int16x8Add) \
- V(Int16x8AddSaturate) \
- V(Int16x8Sub) \
- V(Int16x8SubSaturate) \
- V(Int16x8Mul) \
- V(Int16x8Min) \
- V(Int16x8Max) \
- V(Int16x8ShiftLeftByScalar) \
- V(Int16x8ShiftRightByScalar) \
- V(Int16x8Equal) \
- V(Int16x8NotEqual) \
- V(Int16x8LessThan) \
- V(Int16x8LessThanOrEqual) \
- V(Int16x8GreaterThan) \
- V(Int16x8GreaterThanOrEqual) \
- V(Uint16x8AddSaturate) \
- V(Uint16x8SubSaturate) \
- V(Uint16x8Min) \
- V(Uint16x8Max) \
- V(Uint16x8ShiftLeftByScalar) \
- V(Uint16x8ShiftRightByScalar) \
- V(Uint16x8LessThan) \
- V(Uint16x8LessThanOrEqual) \
- V(Uint16x8GreaterThan) \
- V(Uint16x8GreaterThanOrEqual) \
- V(Bool16x8And) \
- V(Bool16x8Or) \
- V(Bool16x8Xor) \
- V(Bool16x8Not) \
- V(CreateInt8x16) \
- V(Int8x16ReplaceLane) \
- V(Int8x16Neg) \
- V(Int8x16Add) \
- V(Int8x16AddSaturate) \
- V(Int8x16Sub) \
- V(Int8x16SubSaturate) \
- V(Int8x16Mul) \
- V(Int8x16Min) \
- V(Int8x16Max) \
- V(Int8x16ShiftLeftByScalar) \
- V(Int8x16ShiftRightByScalar) \
- V(Int8x16Equal) \
- V(Int8x16NotEqual) \
- V(Int8x16LessThan) \
- V(Int8x16LessThanOrEqual) \
- V(Int8x16GreaterThan) \
- V(Int8x16GreaterThanOrEqual) \
- V(Uint8x16AddSaturate) \
- V(Uint8x16SubSaturate) \
- V(Uint8x16Min) \
- V(Uint8x16Max) \
- V(Uint8x16ShiftLeftByScalar) \
- V(Uint8x16ShiftRightByScalar) \
- V(Uint8x16LessThan) \
- V(Uint8x16LessThanOrEqual) \
- V(Uint8x16GreaterThan) \
- V(Uint8x16GreaterThanOrEqual) \
- V(Bool8x16And) \
- V(Bool8x16Or) \
- V(Bool8x16Xor) \
- V(Bool8x16Not) \
- V(Simd128And) \
- V(Simd128Or) \
- V(Simd128Xor) \
- V(Simd128Not) \
- V(Simd32x4Select) \
- V(Simd32x4Swizzle) \
- V(Simd32x4Shuffle) \
- V(Simd16x8Select) \
- V(Simd16x8Swizzle) \
- V(Simd16x8Shuffle) \
- V(Simd8x16Select) \
- V(Simd8x16Swizzle) \
- V(Simd8x16Shuffle)
-
-#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
- V(Float32x4ExtractLane) \
- V(Int32x4ExtractLane) \
- V(Int16x8ExtractLane) \
- V(Int8x16ExtractLane)
-
-#define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
- V(Bool32x4AnyTrue) \
- V(Bool32x4AllTrue) \
- V(Bool16x8AnyTrue) \
- V(Bool16x8AllTrue) \
- V(Bool8x16AnyTrue) \
- V(Bool8x16AllTrue)
-
-#define MACHINE_SIMD_GENERIC_OP_LIST(V) \
- V(Simd128Load) \
- V(Simd128Load1) \
- V(Simd128Load2) \
- V(Simd128Load3) \
- V(Simd128Store) \
- V(Simd128Store1) \
- V(Simd128Store2) \
- V(Simd128Store3)
-
-#define MACHINE_SIMD_OP_LIST(V) \
- MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
- MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
- MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
- MACHINE_SIMD_GENERIC_OP_LIST(V)
+#define MACHINE_SIMD_OP_LIST(V) \
+ V(F32x4Splat) \
+ V(F32x4ExtractLane) \
+ V(F32x4ReplaceLane) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4Sqrt) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4MinNum) \
+ V(F32x4MaxNum) \
+ V(F32x4RecipRefine) \
+ V(F32x4RecipSqrtRefine) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Gt) \
+ V(F32x4Ge) \
+ V(I32x4Splat) \
+ V(I32x4ExtractLane) \
+ V(I32x4ReplaceLane) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4Neg) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4LtS) \
+ V(I32x4LeS) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4UConvertF32x4) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4ShrU) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
+ V(I32x4LtU) \
+ V(I32x4LeU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I16x8Splat) \
+ V(I16x8ExtractLane) \
+ V(I16x8ReplaceLane) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8Neg) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ShrU) \
+ V(I16x8UConvertI32x4) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8LtU) \
+ V(I16x8LeU) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I8x16Splat) \
+ V(I8x16ExtractLane) \
+ V(I8x16ReplaceLane) \
+ V(I8x16SConvertI16x8) \
+ V(I8x16Neg) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16ShrU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(I8x16LtU) \
+ V(I8x16LeU) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(S128Load) \
+ V(S128Store) \
+ V(S128Zero) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor) \
+ V(S128Not) \
+ V(S32x4Select) \
+ V(S32x4Swizzle) \
+ V(S32x4Shuffle) \
+ V(S16x8Select) \
+ V(S16x8Swizzle) \
+ V(S16x8Shuffle) \
+ V(S8x16Select) \
+ V(S8x16Swizzle) \
+ V(S8x16Shuffle) \
+ V(S1x4Zero) \
+ V(S1x4And) \
+ V(S1x4Or) \
+ V(S1x4Xor) \
+ V(S1x4Not) \
+ V(S1x4AnyTrue) \
+ V(S1x4AllTrue) \
+ V(S1x8Zero) \
+ V(S1x8And) \
+ V(S1x8Or) \
+ V(S1x8Xor) \
+ V(S1x8Not) \
+ V(S1x8AnyTrue) \
+ V(S1x8AllTrue) \
+ V(S1x16Zero) \
+ V(S1x16And) \
+ V(S1x16Or) \
+ V(S1x16Xor) \
+ V(S1x16Not) \
+ V(S1x16AnyTrue) \
+ V(S1x16AllTrue)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
@@ -778,6 +791,13 @@ class V8_EXPORT_PRIVATE IrOpcode {
return kIfTrue <= value && value <= kIfDefault;
}
+ // Returns true if opcode terminates control flow in a graph (i.e. respective
+ // nodes are expected to have control uses by the graphs {End} node only).
+ static bool IsGraphTerminator(Value value) {
+ return value == kDeoptimize || value == kReturn || value == kTailCall ||
+ value == kTerminate || value == kThrow;
+ }
+
// Returns true if opcode can be inlined.
static bool IsInlineeOpcode(Value value) {
return value == kJSConstruct || value == kJSCall;
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index dfd4c4b604..a3948d73ab 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -625,13 +625,20 @@ Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
}
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
// Division is tricky, so all we do is try ruling out -0 and NaN.
- bool maybe_minuszero = !lhs->Is(cache_.kPositiveIntegerOrNaN) ||
- !rhs->Is(cache_.kPositiveIntegerOrNaN);
bool maybe_nan =
lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
+ lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+
+ // Try to rule out -0.
+ bool maybe_minuszero =
+ !lhs->Is(cache_.kInteger) ||
+ (lhs->Maybe(cache_.kZeroish) && rhs->Min() < 0.0) ||
+ (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY);
// Take into account the -0 and NaN information computed earlier.
Type* type = Type::PlainNumber();
@@ -905,6 +912,9 @@ Type* OperationTyper::NumberImul(Type* lhs, Type* rhs) {
Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
return Type::NaN();
}
@@ -928,6 +938,9 @@ Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
Type* OperationTyper::NumberMin(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
return Type::NaN();
}
@@ -955,11 +968,11 @@ Type* OperationTyper::NumberPow(Type* lhs, Type* rhs) {
return Type::Number();
}
-#define SPECULATIVE_NUMBER_BINOP(Name) \
- Type* OperationTyper::Speculative##Name(Type* lhs, Type* rhs) { \
- lhs = ToNumber(Type::Intersect(lhs, Type::NumberOrOddball(), zone())); \
- rhs = ToNumber(Type::Intersect(rhs, Type::NumberOrOddball(), zone())); \
- return Name(lhs, rhs); \
+#define SPECULATIVE_NUMBER_BINOP(Name) \
+ Type* OperationTyper::Speculative##Name(Type* lhs, Type* rhs) { \
+ lhs = SpeculativeToNumber(lhs); \
+ rhs = SpeculativeToNumber(rhs); \
+ return Name(lhs, rhs); \
}
SPECULATIVE_NUMBER_BINOP(NumberAdd)
SPECULATIVE_NUMBER_BINOP(NumberSubtract)
@@ -974,6 +987,10 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
+Type* OperationTyper::SpeculativeToNumber(Type* type) {
+ return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
+}
+
Type* OperationTyper::ToPrimitive(Type* type) {
if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
return type;
@@ -1009,6 +1026,19 @@ Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
return singleton_true();
}
+Type* OperationTyper::CheckFloat64Hole(Type* type) {
+ if (type->Maybe(Type::Hole())) {
+ // Turn "the hole" into undefined.
+ type = Type::Intersect(type, Type::Number(), zone());
+ type = Type::Union(type, Type::Undefined(), zone());
+ }
+ return type;
+}
+
+Type* OperationTyper::CheckNumber(Type* type) {
+ return Type::Intersect(type, Type::Number(), zone());
+}
+
Type* OperationTyper::TypeTypeGuard(const Operator* sigma_op, Type* input) {
return Type::Intersect(input, TypeGuardTypeOf(sigma_op), zone());
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 09f063c14e..d2d10fab84 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -23,7 +23,7 @@ class Operator;
class Type;
class TypeCache;
-class OperationTyper {
+class V8_EXPORT_PRIVATE OperationTyper {
public:
OperationTyper(Isolate* isolate, Zone* zone);
@@ -39,6 +39,7 @@ class OperationTyper {
// Number unary operators.
#define DECLARE_METHOD(Name) Type* Name(Type* type);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
// Number binary operators.
@@ -47,6 +48,10 @@ class OperationTyper {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
+ // Check operators.
+ Type* CheckFloat64Hole(Type* type);
+ Type* CheckNumber(Type* type);
+
Type* TypeTypeGuard(const Operator* sigma_op, Type* input);
enum ComparisonOutcomeFlags {
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 0d488d8514..0c0a3d803a 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -32,7 +32,6 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Strict equality cannot lazily deoptimize.
case IrOpcode::kJSStrictEqual:
- case IrOpcode::kJSStrictNotEqual:
return false;
// Binary operations
@@ -54,7 +53,6 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Compare operations
case IrOpcode::kJSEqual:
- case IrOpcode::kJSNotEqual:
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSGreaterThanOrEqual:
case IrOpcode::kJSLessThan:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 687424b66f..ebf2c421b5 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -107,9 +107,6 @@ void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
tmp_inputs.push_back(mapping->at(input->id()));
}
copy = graph->NewNode(orig->op(), orig->InputCount(), &tmp_inputs[0]);
- if (NodeProperties::IsTyped(orig)) {
- NodeProperties::SetType(copy, NodeProperties::GetType(orig));
- }
mapping->at(orig->id()) = copy;
TRACE(" copy #%d:%s -> #%d\n", orig->id(), orig->op()->mnemonic(),
copy->id());
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 330b0960ec..585923fa69 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -26,7 +26,6 @@
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/escape-analysis.h"
#include "src/compiler/frame-elider.h"
-#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/instruction-selector.h"
@@ -115,6 +114,7 @@ class PipelineData {
// For WASM compile entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
+ PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>*
protected_instructions)
@@ -122,6 +122,7 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
+ pipeline_statistics_(pipeline_statistics),
graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_(jsgraph->graph()),
source_positions_(source_positions),
@@ -519,14 +520,14 @@ PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
if (FLAG_trace_turbo) {
TurboJsonFile json_of(info, std::ios_base::trunc);
- Handle<Script> script = info->script();
std::unique_ptr<char[]> function_name = info->GetDebugName();
- int pos = info->shared_info()->start_position();
+ int pos = info->parse_info() ? info->shared_info()->start_position() : 0;
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
Isolate* isolate = info->isolate();
- if (!script->IsUndefined(isolate) &&
- !script->source()->IsUndefined(isolate)) {
+ Handle<Script> script =
+ info->parse_info() ? info->script() : Handle<Script>::null();
+ if (!script.is_null() && !script->source()->IsUndefined(isolate)) {
DisallowHeapAllocation no_allocation;
int start = info->shared_info()->start_position();
int len = info->shared_info()->end_position() - start;
@@ -548,10 +549,11 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob(ParseInfo* parse_info, Handle<JSFunction> function)
// Note that the CompilationInfo is not initialized at the time we pass it
// to the CompilationJob constructor, but it is not dereferenced there.
- : CompilationJob(parse_info->isolate(), &info_, "TurboFan"),
+ : CompilationJob(function->GetIsolate(), &info_, "TurboFan"),
parse_info_(parse_info),
- zone_stats_(parse_info->isolate()->allocator()),
- info_(parse_info_.get()->zone(), parse_info_.get(), function),
+ zone_stats_(function->GetIsolate()->allocator()),
+ info_(parse_info_.get()->zone(), parse_info_.get(),
+ function->GetIsolate(), function),
pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
data_(&zone_stats_, info(), pipeline_statistics_.get()),
pipeline_(&data_),
@@ -648,7 +650,9 @@ class PipelineWasmCompilationJob final : public CompilationJob {
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
- data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
+ pipeline_statistics_(CreatePipelineStatistics(info, &zone_stats_)),
+ data_(&zone_stats_, info, jsgraph, pipeline_statistics_.get(),
+ source_positions, protected_insts),
pipeline_(&data_),
linkage_(descriptor),
allow_signalling_nan_(allow_signalling_nan) {}
@@ -660,6 +664,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
private:
ZoneStats zone_stats_;
+ std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
@@ -693,9 +698,9 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
pipeline_.RunPrintAndVerify("Optimized Machine", true);
}
@@ -753,11 +758,15 @@ struct GraphBuilderPhase {
if (data->info()->is_optimizing_from_bytecode()) {
// Bytecode graph builder assumes deoptimziation is enabled.
DCHECK(data->info()->is_deoptimization_enabled());
+ JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
+ if (data->info()->is_bailout_on_uninitialized()) {
+ flags |= JSTypeHintLowering::kBailoutOnUninitialized;
+ }
BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector()),
data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
- data->source_positions());
+ data->source_positions(), SourcePosition::kNotInlined, flags);
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
@@ -783,18 +792,17 @@ struct InliningPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
- JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
- if (data->info()->is_deoptimization_enabled()) {
- call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
- }
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
- call_reducer_flags, data->native_context(),
+ data->native_context(),
data->info()->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_function_context_specializing()
? handle(data->info()->context())
- : MaybeHandle<Context>());
+ : MaybeHandle<Context>(),
+ data->info()->is_function_context_specializing()
+ ? data->info()->closure()
+ : MaybeHandle<JSFunction>());
JSFrameSpecialization frame_specialization(
&graph_reducer, data->info()->osr_frame(), data->jsgraph());
JSNativeContextSpecialization::Flags flags =
@@ -805,9 +813,6 @@ struct InliningPhase {
if (data->info()->is_bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
- if (data->info()->is_deoptimization_enabled()) {
- flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
- }
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(), flags, data->native_context(),
data->info()->dependencies(), temp_zone);
@@ -827,10 +832,14 @@ struct InliningPhase {
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
- AddReducer(data, &graph_reducer, &native_context_specialization);
+ if (data->info()->is_deoptimization_enabled()) {
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ }
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
- AddReducer(data, &graph_reducer, &call_reducer);
+ if (data->info()->is_deoptimization_enabled()) {
+ AddReducer(data, &graph_reducer, &call_reducer);
+ }
AddReducer(data, &graph_reducer, &inlining);
graph_reducer.ReduceGraph();
}
@@ -998,9 +1007,7 @@ struct LoopExitEliminationPhase {
};
struct ConcurrentOptimizationPrepPhase {
- static const char* phase_name() {
- return "concurrent optimization preparation";
- }
+ static const char* phase_name() { return "concurrency preparation"; }
void Run(PipelineData* data, Zone* temp_zone) {
// Make sure we cache these code stubs.
@@ -1041,9 +1048,9 @@ struct EarlyOptimizationPhase {
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &redundancy_elimination);
- AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
};
@@ -1075,7 +1082,7 @@ struct EffectControlLinearizationPhase {
// effects (such as changing representation to tagged or
// 'floating' allocation regions.)
Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kNoFlags);
+ Scheduler::kTempSchedule);
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), schedule);
@@ -1134,6 +1141,7 @@ struct LoadEliminationPhase {
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
+ CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
@@ -1141,8 +1149,9 @@ struct LoadEliminationPhase {
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
AddReducer(data, &graph_reducer, &load_elimination);
- AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
};
@@ -1181,11 +1190,11 @@ struct LateOptimizationPhase {
TailCallOptimization tco(data->common(), data->graph());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &select_lowering);
AddReducer(data, &graph_reducer, &tco);
+ AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
};
@@ -1306,9 +1315,7 @@ struct AllocateGeneralRegistersPhase {
template <typename RegAllocator>
struct AllocateFPRegistersPhase {
- static const char* phase_name() {
- return "allocate floating point registers";
- }
+ static const char* phase_name() { return "allocate f.p. registers"; }
void Run(PipelineData* data, Zone* temp_zone) {
RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
@@ -1506,8 +1513,6 @@ bool PipelineImpl::CreateGraph() {
// Perform OSR deconstruction.
if (info()->is_osr()) {
Run<OsrDeconstructionPhase>();
-
- Run<UntyperPhase>();
RunPrintAndVerify("OSR deconstruction", true);
}
@@ -1519,11 +1524,6 @@ bool PipelineImpl::CreateGraph() {
Run<EarlyGraphTrimmingPhase>();
RunPrintAndVerify("Early trimmed", true);
- if (FLAG_print_turbo_replay) {
- // Print a replay of the initial graph.
- GraphReplayPrinter::PrintReplay(data->graph());
- }
-
// Run the type-sensitive lowerings and optimizations on the graph.
{
// Determine the Typer operation flags.
@@ -1545,8 +1545,6 @@ bool PipelineImpl::CreateGraph() {
Run<TyperPhase>(&typer);
RunPrintAndVerify("Typed");
- data->BeginPhaseKind("lowering");
-
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
@@ -1564,6 +1562,8 @@ bool PipelineImpl::CreateGraph() {
bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
+ data->BeginPhaseKind("lowering");
+
if (data->info()->is_loop_peeling_enabled()) {
Run<LoopPeelingPhase>();
RunPrintAndVerify("Loops peeled", true);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 455b0ae97e..2967ad73ed 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -792,6 +792,16 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ sync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ bind(&exchange); \
+ __ load_instr(i.OutputRegister(0), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ store_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ bne(&exchange, cr0); \
+ } while (0)
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
@@ -1579,12 +1589,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
break;
case kPPC_Cntlz32:
- __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
+ __ cntlzw(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_Cntlz64:
- __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
+ __ cntlzd(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
#endif
@@ -1978,6 +1988,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
break;
+ case kAtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
+ __ extsb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
+ break;
+ case kAtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
+ __ extsh(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
+ break;
+ case kAtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
+ break;
default:
UNREACHABLE();
break;
@@ -2194,7 +2221,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2337,6 +2366,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2423,21 +2453,30 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value;
-// bit_cast of snan is converted to qnan on ia32/x64
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- intptr_t valueInt = (src.type() == Constant::kFloat32)
- ? src.ToFloat32AsInt()
- : src.ToFloat64AsInt();
- if (valueInt == ((src.type() == Constant::kFloat32)
- ? 0x7fa00000
- : 0x7fa0000000000000)) {
- value = bit_cast<double, int64_t>(0x7ff4000000000000L);
+ // casting double precision snan to single precision
+ // converts it to qnan on ia32/x64
+ if (src.type() == Constant::kFloat32) {
+ int32_t val = src.ToFloat32AsInt();
+ if ((val & 0x7f800000) == 0x7f800000) {
+ int64_t dval = static_cast<int64_t>(val);
+ dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
+ value = bit_cast<double, int64_t>(dval);
+ } else {
+ value = src.ToFloat32();
+ }
} else {
-#endif
- value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int64_t val = src.ToFloat64AsInt();
+ if ((val & 0x7f80000000000000) == 0x7f80000000000000) {
+ value = bit_cast<double, int64_t>(val);
+ } else {
+ value = src.ToFloat64();
+ }
}
+#else
+ value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index c2770b3ce8..449e710389 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -1240,6 +1240,10 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_Uint32ToUint64, node);
}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kPPC_DoubleToUint64, node);
+}
#endif
@@ -2113,6 +2117,62 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 0e101770c3..a2cf562115 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -167,8 +167,7 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
void RawMachineAssembler::Unreachable() {
- Node* values[] = {UndefinedConstant()}; // Unused.
- Node* ret = MakeNode(common()->Throw(), 1, values);
+ Node* ret = MakeNode(common()->Throw(), 0, nullptr);
schedule()->AddThrow(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -279,7 +278,17 @@ Node* RawMachineAssembler::CallCFunction8(
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
+BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
+ label->used_ = true;
+ return EnsureBlock(label);
+}
+BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
+ if (label->block_ == nullptr) {
+ label->block_ = schedule()->NewBasicBlock();
+ }
+ return label->block_;
+}
void RawMachineAssembler::Bind(RawMachineLabel* label) {
DCHECK(current_block_ == nullptr);
@@ -289,18 +298,29 @@ void RawMachineAssembler::Bind(RawMachineLabel* label) {
current_block_->set_deferred(label->deferred_);
}
-
-BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
- label->used_ = true;
- return EnsureBlock(label);
+#if DEBUG
+void RawMachineAssembler::Bind(RawMachineLabel* label,
+ AssemblerDebugInfo info) {
+ if (current_block_ != nullptr) {
+ std::stringstream str;
+ str << "Binding label without closing previous block:"
+ << "\n# label: " << info
+ << "\n# previous block: " << *current_block_;
+ FATAL(str.str().c_str());
+ }
+ Bind(label);
+ current_block_->set_debug_info(info);
}
-
-BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
- if (label->block_ == nullptr) label->block_ = schedule()->NewBasicBlock();
- return label->block_;
+void RawMachineAssembler::PrintCurrentBlock(std::ostream& os) {
+ os << CurrentBlock();
}
+void RawMachineAssembler::SetInitialDebugInformation(
+ AssemblerDebugInfo debug_info) {
+ CurrentBlock()->set_debug_info(debug_info);
+}
+#endif // DEBUG
BasicBlock* RawMachineAssembler::CurrentBlock() {
DCHECK(current_block_);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index d726217ed4..19a0f3bfd4 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -174,6 +174,23 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* value) {
return AddNode(machine()->AtomicStore(rep), base, index, value);
}
+#define ATOMIC_FUNCTION(name) \
+ Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value) { \
+ return AddNode(machine()->Atomic##name(rep), base, index, value); \
+ }
+ ATOMIC_FUNCTION(Exchange);
+ ATOMIC_FUNCTION(Add);
+ ATOMIC_FUNCTION(Sub);
+ ATOMIC_FUNCTION(And);
+ ATOMIC_FUNCTION(Or);
+ ATOMIC_FUNCTION(Xor);
+#undef ATOMIC_FUNCTION
+
+ Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
+ Node* old_value, Node* new_value) {
+ return AddNode(machine()->AtomicCompareExchange(rep), base, index,
+ old_value, new_value);
+ }
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
@@ -429,6 +446,19 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#undef UINTPTR_BINOP
+ Node* Int32AbsWithOverflow(Node* a) {
+ return AddNode(machine()->Int32AbsWithOverflow().op(), a);
+ }
+
+ Node* Int64AbsWithOverflow(Node* a) {
+ return AddNode(machine()->Int64AbsWithOverflow().op(), a);
+ }
+
+ Node* IntPtrAbsWithOverflow(Node* a) {
+ return kPointerSize == 8 ? Int64AbsWithOverflow(a)
+ : Int32AbsWithOverflow(a);
+ }
+
Node* Float32Add(Node* a, Node* b) {
return AddNode(machine()->Float32Add(), a, b);
}
@@ -568,6 +598,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
+ Node* ChangeFloat64ToUint64(Node* a) {
+ return AddNode(machine()->ChangeFloat64ToUint64(), a);
+ }
Node* TruncateFloat64ToUint32(Node* a) {
return AddNode(machine()->TruncateFloat64ToUint32(), a);
}
@@ -772,6 +805,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Unreachable();
void Comment(const char* msg);
+#if DEBUG
+ void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
+ void SetInitialDebugInformation(AssemblerDebugInfo info);
+ void PrintCurrentBlock(std::ostream& os);
+#endif // DEBUG
+
// Add success / exception successor blocks and ends the current block ending
// in a potentially throwing call node.
void Continuations(Node* call, RawMachineLabel* if_success,
@@ -836,6 +875,8 @@ class V8_EXPORT_PRIVATE RawMachineLabel final {
: deferred_(type == kDeferred) {}
~RawMachineLabel();
+ BasicBlock* block() const { return block_; }
+
private:
BasicBlock* block_ = nullptr;
bool used_ = false;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 403c344aee..f9c076d951 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/register-allocator.h"
+
+#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/register-allocator.h"
#include "src/string-stream.h"
namespace v8 {
@@ -3150,6 +3152,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(
// the range end. Split current at position where it becomes blocked.
LiveRange* tail = SplitRangeAt(current, pos);
AddToUnhandledSorted(tail);
+
+ // Try to allocate preferred register once more.
+ if (TryAllocatePreferredReg(current, free_until_pos)) return true;
}
// Register reg is available at the range start and is free until the range
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 4b4f8c91c6..0439c536de 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -27,9 +27,19 @@ const char* Truncation::description() const {
case TruncationKind::kWord64:
return "truncate-to-word64";
case TruncationKind::kFloat64:
- return "truncate-to-float64";
+ switch (identify_zeros()) {
+ case kIdentifyZeros:
+ return "truncate-to-float64 (identify zeros)";
+ case kDistinguishZeros:
+ return "truncate-to-float64 (distinguish zeros)";
+ }
case TruncationKind::kAny:
- return "no-truncation";
+ switch (identify_zeros()) {
+ case kIdentifyZeros:
+ return "no-truncation (but identify zeros)";
+ case kDistinguishZeros:
+ return "no-truncation (but distinguish zeros)";
+ }
}
UNREACHABLE();
return nullptr;
@@ -38,10 +48,10 @@ const char* Truncation::description() const {
// Partial order for truncations:
//
-// kWord64 kAny
-// ^ ^
-// \ |
-// \ kFloat64 <--+
+// kWord64 kAny <-------+
+// ^ ^ |
+// \ | |
+// \ kFloat64 |
// \ ^ |
// \ / |
// kWord32 kBool
@@ -52,6 +62,8 @@ const char* Truncation::description() const {
// \ /
// \ /
// kNone
+//
+// TODO(jarin) We might consider making kBool < kFloat64.
// static
Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
@@ -73,6 +85,15 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
return TruncationKind::kNone;
}
+// static
+IdentifyZeros Truncation::GeneralizeIdentifyZeros(IdentifyZeros i1,
+ IdentifyZeros i2) {
+ if (i1 == i2) {
+ return i1;
+ } else {
+ return kDistinguishZeros;
+ }
+}
// static
bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
@@ -96,6 +117,10 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return false;
}
+// static
+bool Truncation::LessGeneralIdentifyZeros(IdentifyZeros i1, IdentifyZeros i2) {
+ return i1 == i2 || i1 == kIdentifyZeros;
+}
namespace {
@@ -282,12 +307,16 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
- } else if (output_rep == MachineRepresentation::kBit &&
- use_info.type_check() == TypeCheckKind::kSignedSmall) {
- // TODO(turbofan): Consider adding a Bailout operator that just deopts.
- // Also use that for MachineRepresentation::kPointer case above.
- node = InsertChangeBitToTagged(node);
- op = simplified()->CheckedTaggedToTaggedSigned();
+ } else if (output_rep == MachineRepresentation::kBit) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts.
+ // Also use that for MachineRepresentation::kPointer case above.
+ node = InsertChangeBitToTagged(node);
+ op = simplified()->CheckedTaggedToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -407,7 +436,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged();
+ op = simplified()->ChangeFloat64ToTagged(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type->Is(Type::Signed31())) { // float64 -> int32 -> tagged
node = InsertChangeFloat64ToInt32(node);
@@ -421,7 +453,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
} else {
- op = simplified()->ChangeFloat64ToTagged();
+ op = simplified()->ChangeFloat64ToTagged(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
}
} else {
return TypeError(node, output_rep, output_type,
@@ -469,8 +504,7 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
node = jsgraph()->graph()->NewNode(op, node);
op = machine()->TruncateFloat64ToFloat32();
}
- } else if (output_rep == MachineRepresentation::kTagged ||
- output_rep == MachineRepresentation::kTaggedPointer) {
+ } else if (IsAnyTagged(output_rep)) {
if (output_type->Is(Type::NumberOrOddball())) {
// tagged -> float64 -> float32
if (output_type->Is(Type::Number())) {
@@ -608,6 +642,9 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
op = machine()->TruncateFloat64ToWord32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
@@ -623,6 +660,9 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
op = machine()->TruncateFloat64ToWord32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
if (output_type->Is(Type::Signed32())) {
@@ -633,6 +673,9 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else {
op = simplified()->TruncateTaggedToWord32();
}
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
@@ -652,7 +695,13 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->TruncateTaggedToWord32();
} else if (use_info.type_check() != TypeCheckKind::kNone) {
op = simplified()->CheckedTruncateTaggedToWord32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kWord32) {
// Only the checked case should get here, the non-checked case is
@@ -663,6 +712,9 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return node;
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->CheckedUint32ToInt32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
}
} else {
DCHECK_EQ(TypeCheckKind::kNumberOrOddball, use_info.type_check());
@@ -725,7 +777,14 @@ Node* RepresentationChanger::GetBitRepresentationFor(
// true is the only trueish Oddball.
op = simplified()->ChangeTaggedToBit();
} else {
- op = simplified()->TruncateTaggedToBit();
+ if (output_rep == MachineRepresentation::kTagged &&
+ output_type->Maybe(Type::SignedSmall())) {
+ op = simplified()->TruncateTaggedToBit();
+ } else {
+ // The {output_type} either doesn't include the Smi range,
+ // or the {output_rep} is known to be TaggedPointer.
+ op = simplified()->TruncateTaggedPointerToBit();
+ }
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 4fa7d917b7..af96f7333f 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -12,18 +12,34 @@ namespace v8 {
namespace internal {
namespace compiler {
+enum IdentifyZeros { kIdentifyZeros, kDistinguishZeros };
+
class Truncation final {
public:
// Constructors.
- static Truncation None() { return Truncation(TruncationKind::kNone); }
- static Truncation Bool() { return Truncation(TruncationKind::kBool); }
- static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
- static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
- static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
- static Truncation Any() { return Truncation(TruncationKind::kAny); }
+ static Truncation None() {
+ return Truncation(TruncationKind::kNone, kIdentifyZeros);
+ }
+ static Truncation Bool() {
+ return Truncation(TruncationKind::kBool, kIdentifyZeros);
+ }
+ static Truncation Word32() {
+ return Truncation(TruncationKind::kWord32, kIdentifyZeros);
+ }
+ static Truncation Word64() {
+ return Truncation(TruncationKind::kWord64, kIdentifyZeros);
+ }
+ static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return Truncation(TruncationKind::kFloat64, identify_zeros);
+ }
+ static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return Truncation(TruncationKind::kAny, identify_zeros);
+ }
static Truncation Generalize(Truncation t1, Truncation t2) {
- return Truncation(Generalize(t1.kind(), t2.kind()));
+ return Truncation(
+ Generalize(t1.kind(), t2.kind()),
+ GeneralizeIdentifyZeros(t1.identify_zeros(), t2.identify_zeros()));
}
// Queries.
@@ -45,17 +61,25 @@ class Truncation final {
return LessGeneral(kind_, TruncationKind::kFloat64) ||
LessGeneral(kind_, TruncationKind::kWord64);
}
+ bool IdentifiesZeroAndMinusZero() const {
+ return identify_zeros() == kIdentifyZeros;
+ }
// Operators.
- bool operator==(Truncation other) const { return kind() == other.kind(); }
+ bool operator==(Truncation other) const {
+ return kind() == other.kind() && identify_zeros() == other.identify_zeros();
+ }
bool operator!=(Truncation other) const { return !(*this == other); }
// Debug utilities.
const char* description() const;
bool IsLessGeneralThan(Truncation other) {
- return LessGeneral(kind(), other.kind());
+ return LessGeneral(kind(), other.kind()) &&
+ LessGeneralIdentifyZeros(identify_zeros(), other.identify_zeros());
}
+ IdentifyZeros identify_zeros() const { return identify_zeros_; }
+
private:
enum class TruncationKind : uint8_t {
kNone,
@@ -66,13 +90,21 @@ class Truncation final {
kAny
};
- explicit Truncation(TruncationKind kind) : kind_(kind) {}
+ explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros)
+ : kind_(kind), identify_zeros_(identify_zeros) {
+ DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 ||
+ identify_zeros == kIdentifyZeros);
+ }
TruncationKind kind() const { return kind_; }
TruncationKind kind_;
+ IdentifyZeros identify_zeros_;
static TruncationKind Generalize(TruncationKind rep1, TruncationKind rep2);
+ static IdentifyZeros GeneralizeIdentifyZeros(IdentifyZeros i1,
+ IdentifyZeros i2);
static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
+ static bool LessGeneralIdentifyZeros(IdentifyZeros u1, IdentifyZeros u2);
};
enum class TypeCheckKind : uint8_t {
@@ -119,13 +151,10 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
class UseInfo {
public:
UseInfo(MachineRepresentation representation, Truncation truncation,
- TypeCheckKind type_check = TypeCheckKind::kNone,
- CheckForMinusZeroMode minus_zero_check =
- CheckForMinusZeroMode::kCheckForMinusZero)
+ TypeCheckKind type_check = TypeCheckKind::kNone)
: representation_(representation),
truncation_(truncation),
- type_check_(type_check),
- minus_zero_check_(minus_zero_check) {}
+ type_check_(type_check) {}
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
@@ -163,17 +192,14 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
TypeCheckKind::kSignedSmall);
}
- static UseInfo CheckedSignedSmallAsWord32(
- CheckForMinusZeroMode minus_zero_mode =
- CheckForMinusZeroMode::kCheckForMinusZero) {
- return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSignedSmall, minus_zero_mode);
+ static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros) {
+ return UseInfo(MachineRepresentation::kWord32,
+ Truncation::Any(identify_zeros),
+ TypeCheckKind::kSignedSmall);
}
- static UseInfo CheckedSigned32AsWord32(
- CheckForMinusZeroMode minus_zero_mode =
- CheckForMinusZeroMode::kCheckForMinusZero) {
+ static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSigned32, minus_zero_mode);
+ TypeCheckKind::kSigned32);
}
static UseInfo CheckedNumberAsFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
@@ -208,14 +234,16 @@ class UseInfo {
MachineRepresentation representation() const { return representation_; }
Truncation truncation() const { return truncation_; }
TypeCheckKind type_check() const { return type_check_; }
- CheckForMinusZeroMode minus_zero_check() const { return minus_zero_check_; }
+ CheckForMinusZeroMode minus_zero_check() const {
+ return truncation().IdentifiesZeroAndMinusZero()
+ ? CheckForMinusZeroMode::kDontCheckForMinusZero
+ : CheckForMinusZeroMode::kCheckForMinusZero;
+ }
private:
MachineRepresentation representation_;
Truncation truncation_;
TypeCheckKind type_check_;
- // TODO(jarin) Integrate with truncations.
- CheckForMinusZeroMode minus_zero_check_;
};
// Contains logic related to changing the representation of values for constants
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 8e9db3dcb0..f46740c9ae 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -135,26 +135,28 @@ static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
}
-static inline bool HasRegisterInput(Instruction* instr, int index) {
- return instr->InputAt(index)->IsRegister();
-}
-
static inline bool HasFPRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsFPRegister();
}
-static inline bool HasImmediateInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsImmediate();
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister() ||
+ HasFPRegisterInput(instr, index);
}
-static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsStackSlot();
+static inline bool HasImmediateInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsImmediate();
}
static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsFPStackSlot();
}
+static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsStackSlot() ||
+ HasFPStackSlotInput(instr, index);
+}
+
namespace {
class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -307,6 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Add64:
case kS390_Sub32:
case kS390_Sub64:
+ case kS390_Abs64:
+ case kS390_Abs32:
+ case kS390_Mul32:
return overflow;
default:
break;
@@ -318,6 +323,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Add64:
case kS390_Sub32:
case kS390_Sub64:
+ case kS390_Abs64:
+ case kS390_Abs32:
+ case kS390_Mul32:
return nooverflow;
default:
break;
@@ -330,175 +338,182 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return kNoCondition;
}
-typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
-typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
-typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
-typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
-typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
- const MemOperand&);
-typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
- const Operand&);
-
-#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
- { \
- CHECK(HasImmediateInput(instr, (num))); \
- int doZeroExt = i.InputInt32(num); \
- if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
- }
-
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRTypeInstr rr_instr,
- RMTypeInstr rm_instr, RITypeInstr ri_instr) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define GET_MEMOPERAND32(ret, fi) \
+ ([&](int& ret) { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ MemOperand mem(r0); \
+ if (mode != kMode_None) { \
+ size_t first_index = (fi); \
+ mem = i.MemoryOperand(&mode, &first_index); \
+ ret = first_index; \
+ } else { \
+ mem = i.InputStackSlot32(fi); \
+ } \
+ return mem; \
+ })(ret)
+
+#define GET_MEMOPERAND(ret, fi) \
+ ([&](int& ret) { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ MemOperand mem(r0); \
+ if (mode != kMode_None) { \
+ size_t first_index = (fi); \
+ mem = i.MemoryOperand(&mode, &first_index); \
+ ret = first_index; \
+ } else { \
+ mem = i.InputStackSlot(fi); \
+ } \
+ return mem; \
+ })(ret)
+
+#define RRInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ __ instr(i.OutputRegister(), i.InputRegister(1)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
+#define RIInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ __ instr(i.OutputRegister(), i.InputImmediate(1)); \
+ return 2; \
+ }
+#define RMInstr(instr, GETMEM) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
+ return ret; \
+ }
+#define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
+#define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RMTypeInstr rm_instr, RITypeInstr ri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define RRRInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
+ return 2; \
+ }
+#define RRIInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
+ return 2; \
+ }
+#define RRMInstr(instr, GETMEM) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
+ return ret; \
+ }
+#define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
+#define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
+
+#define DDInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define DMInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
+ return ret; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rrm_instr != NULL);
- (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define DMTInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
+ kScratchDoubleReg); \
+ return ret; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- CHECK(mode == kMode_None);
- int zeroExtIndex = 2;
- if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else {
- UNREACHABLE();
+#define R_MInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
+ return ret; \
+ }
+
+#define R_DInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+
+#define D_DInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ return 2; \
+ }
+
+#define D_MInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
+ return ret; \
+ }
+
+#define D_MTInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
+ kScratchDoubleReg); \
+ return ret; \
+ }
+
+static int nullInstr() {
+ UNREACHABLE();
+ return -1;
}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRTypeInstr rr_instr,
- RITypeInstr ri_instr) {
+template <int numOfOperand, class RType, class MType, class IType>
+static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
- CHECK(mode == kMode_None);
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- int zeroExtIndex = 2;
- if (HasRegisterInput(instr, 1)) {
- (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+ if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
+ return m();
+ } else if (HasRegisterInput(instr, numOfOperand - 1)) {
+ return r();
+ } else if (HasImmediateInput(instr, numOfOperand - 1)) {
+ return i();
} else {
UNREACHABLE();
+ return -1;
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
-#define ASSEMBLE_BIN_OP(instr1, instr2, instr3) \
- AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
- &MacroAssembler::instr2, &MacroAssembler::instr3)
+template <class _RR, class _RM, class _RI>
+static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
+ return AssembleOp<2>(instr, _rr, _rm, _ri);
+}
-#undef CHECK_AND_ZERO_EXT_OUTPUT
+template <class _R, class _M, class _I>
+static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
+ return AssembleOp<1>(instr, _r, _m, _i);
+}
-} // namespace
+#define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
+#define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
+#ifdef V8_TARGET_ARCH_S390X
#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
- { \
- CHECK(HasImmediateInput(instr, (num))); \
- int doZeroExt = i.InputInt32(num); \
+ ([&](int index) { \
+ DCHECK(HasImmediateInput(instr, (index))); \
+ int doZeroExt = i.InputInt32(index); \
if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
- }
+ })(num)
+
+#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
+ { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
+#else
+#define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)
+#endif
+
+} // namespace
#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
do { \
@@ -511,19 +526,6 @@ void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
i.InputDoubleRegister(1)); \
} while (0)
-#define ASSEMBLE_BINOP(asm_instr) \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1)); \
- } else if (HasImmediateInput(instr, 1)) { \
- __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
- i.InputImmediate(1)); \
- } else { \
- UNIMPLEMENTED(); \
- } \
- } while (0)
-
#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
do { \
AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
@@ -1351,78 +1353,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
+ case kS390_Abs32:
+ // TODO(john.yan): zero-ext
+ __ lpr(i.OutputRegister(0), i.InputRegister(0));
+ break;
+ case kS390_Abs64:
+ __ lpgr(i.OutputRegister(0), i.InputRegister(0));
+ break;
case kS390_And32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(nrk, And, nilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
} else {
- ASSEMBLE_BIN_OP(nr, And, nilf);
+ ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
}
break;
case kS390_And64:
- ASSEMBLE_BINOP(AndP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
+ }
break;
case kS390_Or32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(ork, Or, oilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
} else {
- ASSEMBLE_BIN_OP(or_z, Or, oilf);
+ ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
}
break;
case kS390_Or64:
- ASSEMBLE_BINOP(OrP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
+ }
break;
case kS390_Xor32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(xrk, Xor, xilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
} else {
- ASSEMBLE_BIN_OP(xr, Xor, xilf);
+ ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
}
break;
case kS390_Xor64:
- ASSEMBLE_BINOP(XorP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
+ }
break;
case kS390_ShiftLeft32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
- &MacroAssembler::ShiftLeft);
+ ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
- &MacroAssembler::sll);
+ ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
- ASSEMBLE_BINOP(sllg);
+ ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
break;
-#endif
case kS390_ShiftRight32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
- &MacroAssembler::srlk);
+ ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
- &MacroAssembler::srl);
+ ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
- ASSEMBLE_BINOP(srlg);
+ ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
break;
-#endif
case kS390_ShiftRightArith32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
- &MacroAssembler::srak);
+ ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
- &MacroAssembler::sra);
+ ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64:
- ASSEMBLE_BINOP(srag);
+ ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
break;
-#endif
#if !V8_TARGET_ARCH_S390X
case kS390_AddPair:
// i.InputRegister(0) ... left low word.
@@ -1499,6 +1514,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#endif
case kS390_RotRight32: {
+ // zero-ext
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@@ -1509,16 +1525,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK_AND_ZERO_EXT_OUTPUT(2);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_RotRight64:
if (HasRegisterInput(instr, 1)) {
- __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+ __ lcgr(kScratchReg, i.InputRegister(1));
__ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
+ DCHECK(HasImmediateInput(instr, 1));
__ rllg(i.OutputRegister(), i.InputRegister(0),
Operand(64 - i.InputInt32(1)));
}
break;
+ // TODO(john.yan): clean up kS390_RotLeftAnd...
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
@@ -1566,191 +1583,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
}
break;
-#endif
case kS390_Add32: {
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
+ ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
} else {
- ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
+ ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
}
break;
}
case kS390_Add64:
- ASSEMBLE_BINOP(AddP);
- break;
- case kS390_AddFloat:
- // Ensure we don't clobber right/InputReg(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(aebr);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
} else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
}
break;
+ case kS390_AddFloat:
+ ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
+ break;
case kS390_AddDouble:
- // Ensure we don't clobber right/InputReg(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(adbr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
break;
case kS390_Sub32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
+ ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
} else {
- ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
+ ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
}
break;
case kS390_Sub64:
- ASSEMBLE_BINOP(SubP);
- break;
- case kS390_SubFloat:
- // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
} else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- }
- __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
}
break;
+ case kS390_SubFloat:
+ ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
+ break;
case kS390_SubDouble:
- // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- }
- __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
break;
case kS390_Mul32:
- ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
+ // zero-ext
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(Mul32));
+ } else {
+ ASSEMBLE_BIN32_OP(RRInstr(Mul32), RM32Instr(Mul32), RIInstr(Mul32));
+ }
break;
case kS390_Mul32WithOverflow:
- ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
- Mul32WithOverflowIfCCUnequal,
- Mul32WithOverflowIfCCUnequal);
+ // zero-ext
+ ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
+ RRM32Instr(Mul32WithOverflowIfCCUnequal),
+ RRIInstr(Mul32WithOverflowIfCCUnequal));
break;
case kS390_Mul64:
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- if (HasRegisterInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
- } else {
- UNIMPLEMENTED();
- }
+ ASSEMBLE_BIN_OP(RRInstr(Mul64), RM64Instr(Mul64), RIInstr(Mul64));
break;
case kS390_MulHigh32:
- ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(MulHigh32), RRM32Instr(MulHigh32),
+ RRIInstr(MulHigh32));
break;
case kS390_MulHighU32:
- ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
+ RRIInstr(MulHighU32));
break;
case kS390_MulFloat:
- // Ensure we don't clobber right
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(meebr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
break;
case kS390_MulDouble:
- // Ensure we don't clobber right
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(mdbr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Div64:
- __ LoadRR(r1, i.InputRegister(0));
- __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
- __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ ASSEMBLE_BIN_OP(RRRInstr(Div64), RRM64Instr(Div64), nullInstr);
break;
-#endif
case kS390_Div32: {
- ASSEMBLE_BIN_OP(Div32, Div32, Div32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(Div32), RRM32Instr(Div32), nullInstr);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_DivU64:
- __ LoadRR(r1, i.InputRegister(0));
- __ LoadImmP(r0, Operand::Zero());
- __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
- __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
break;
-#endif
case kS390_DivU32: {
- ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
break;
}
case kS390_DivFloat:
- // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
break;
case kS390_DivDouble:
- // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
break;
case kS390_Mod32:
- ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(Mod32), RRM32Instr(Mod32), nullInstr);
break;
case kS390_ModU32:
- ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Mod64:
- __ LoadRR(r1, i.InputRegister(0));
- __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
- __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ ASSEMBLE_BIN_OP(RRRInstr(Mod64), RRM64Instr(Mod64), nullInstr);
break;
case kS390_ModU64:
- __ LoadRR(r1, i.InputRegister(0));
- __ LoadImmP(r0, Operand::Zero());
- __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
- __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
break;
-#endif
case kS390_AbsFloat:
__ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_SqrtFloat:
- ASSEMBLE_FLOAT_UNOP(sqebr);
+ ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
+ break;
+ case kS390_SqrtDouble:
+ ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
break;
case kS390_FloorFloat:
__ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1856,9 +1808,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_AbsDouble:
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kS390_SqrtDouble:
- ASSEMBLE_FLOAT_UNOP(sqdbr);
- break;
case kS390_FloorDouble:
__ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
@@ -1876,10 +1825,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
break;
case kS390_NegFloat:
- ASSEMBLE_FLOAT_UNOP(lcebr);
+ ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
break;
case kS390_NegDouble:
- ASSEMBLE_FLOAT_UNOP(lcdbr);
+ ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
break;
case kS390_Cntlz32: {
__ llgfr(i.OutputRegister(), i.InputRegister(0));
@@ -1923,6 +1872,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ And(r0, i.InputRegister(0), i.InputRegister(1));
} else {
+ // detect tmlh/tmhl/tmhh case
Operand opnd = i.InputImmediate(1);
if (is_uint16(opnd.immediate())) {
__ tmll(i.InputRegister(0), opnd);
@@ -2002,7 +1952,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lhr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ExtendSignWord32:
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
@@ -2014,140 +1963,139 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// sign extend
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
+ // Convert Fixed to Floating Point
case kS390_Int64ToFloat32:
- __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Int64ToDouble:
- __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Uint64ToFloat32:
- __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
case kS390_Uint64ToDouble:
- __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
-#endif
case kS390_Int32ToFloat32:
- __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Int32ToDouble:
- __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Uint32ToFloat32:
- __ ConvertUnsignedIntToFloat(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
case kS390_Uint32ToDouble:
- __ ConvertUnsignedIntToDouble(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0));
+ break;
+ case kS390_DoubleToInt32: {
+ Label done;
+ __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
+ kRoundToNearest);
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
+ break;
+ }
+ case kS390_DoubleToUint32: {
+ Label done;
+ __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
- case kS390_DoubleToInt32:
- case kS390_DoubleToUint32:
+ }
case kS390_DoubleToInt64: {
-#if V8_TARGET_ARCH_S390X
- bool check_conversion =
- (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
-#endif
- __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
-#if !V8_TARGET_ARCH_S390X
- kScratchReg,
-#endif
- i.OutputRegister(0), kScratchDoubleReg);
-#if V8_TARGET_ARCH_S390X
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
-#endif
+ __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ }
+ __ bind(&done);
break;
}
- case kS390_Float32ToInt32: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
- kScratchDoubleReg, kRoundToZero);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ case kS390_DoubleToUint64: {
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
+ __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ }
+ __ bind(&done);
+ break;
+ }
+ case kS390_Float32ToInt32: {
+ Label done;
+ __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
+ kRoundToZero);
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
}
case kS390_Float32ToUint32: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
- }
+ Label done;
+ __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_Float32ToUint64: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
+ }
+ __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
}
+ __ bind(&done);
break;
}
-#endif
case kS390_Float32ToInt64: {
-#if V8_TARGET_ARCH_S390X
- bool check_conversion =
- (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
-#endif
- __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
-#if !V8_TARGET_ARCH_S390X
- kScratchReg,
-#endif
- i.OutputRegister(0), kScratchDoubleReg);
-#if V8_TARGET_ARCH_S390X
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
-#endif
- break;
- }
-#if V8_TARGET_ARCH_S390X
- case kS390_DoubleToUint64: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
}
+ __ bind(&done);
break;
}
-#endif
case kS390_DoubleToFloat32:
- __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
break;
case kS390_Float32ToDouble:
- __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
+ nullInstr);
break;
case kS390_DoubleExtractLowWord32:
__ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -2158,13 +2106,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
break;
case kS390_DoubleInsertLowWord32:
- __ lgdr(kScratchReg, i.OutputDoubleRegister());
+ __ lgdr(kScratchReg, i.InputDoubleRegister(0));
__ lr(kScratchReg, i.InputRegister(1));
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
case kS390_DoubleInsertHighWord32:
__ sllg(kScratchReg, i.InputRegister(1), Operand(32));
- __ lgdr(r0, i.OutputDoubleRegister());
+ __ lgdr(r0, i.InputDoubleRegister(0));
__ lr(kScratchReg, r0);
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
@@ -2176,15 +2124,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
case kS390_LoadWordS8:
- ASSEMBLE_LOAD_INTEGER(LoadlB);
-#if V8_TARGET_ARCH_S390X
- __ lgbr(i.OutputRegister(), i.OutputRegister());
-#else
- __ lbr(i.OutputRegister(), i.OutputRegister());
-#endif
+ ASSEMBLE_LOAD_INTEGER(LoadB);
break;
case kS390_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
break;
case kS390_BitcastInt32ToFloat32:
__ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -2231,11 +2174,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_LoadReverse64RR:
__ lrvgr(i.OutputRegister(), i.InputRegister(0));
break;
-#if V8_TARGET_ARCH_S390X
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
break;
-#endif
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
break;
@@ -2283,12 +2224,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
-#if V8_TARGET_ARCH_S390X
- __ lgbr(i.OutputRegister(), i.OutputRegister());
-#else
- __ lbr(i.OutputRegister(), i.OutputRegister());
-#endif
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadB);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
@@ -2361,6 +2297,138 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
__ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
break;
+// 0x aa bb cc dd
+// index = 3..2..1..0
+#define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
+ { \
+ Label do_cs; \
+ __ LoadlW(output, MemOperand(r1)); \
+ __ bind(&do_cs); \
+ __ llgfr(r0, output); \
+ __ risbg(r0, value, Operand(start), Operand(end), Operand(shift_amount), \
+ false); \
+ __ csy(output, r0, MemOperand(r1, offset)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ srl(output, Operand(shift_amount)); \
+ }
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (3 - idx) * 8; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (1 - idx) * 16; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#else
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#endif
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label three, two, one, done;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(3));
+ __ b(Condition(1), &three);
+ __ b(Condition(2), &two);
+ __ b(Condition(4), &one);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_BYTE(0);
+ __ b(&done);
+
+ // ending with 0b01
+ __ bind(&one);
+ ATOMIC_EXCHANGE_BYTE(1);
+ __ b(&done);
+
+ // ending with 0b10
+ __ bind(&two);
+ ATOMIC_EXCHANGE_BYTE(2);
+ __ b(&done);
+
+ // ending with 0b11
+ __ bind(&three);
+ ATOMIC_EXCHANGE_BYTE(3);
+
+ __ bind(&done);
+ if (opcode == kAtomicExchangeInt8) {
+ __ lbr(output, output);
+ } else {
+ __ llcr(output, output);
+ }
+ break;
+ }
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label two, unaligned, done;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(3));
+ __ b(Condition(2), &two);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_HALFWORD(0);
+ __ b(&done);
+
+ // ending with 0b10
+ __ bind(&two);
+ ATOMIC_EXCHANGE_HALFWORD(1);
+
+ __ bind(&done);
+ if (opcode == kAtomicExchangeInt8) {
+ __ lhr(output, output);
+ } else {
+ __ llhr(output, output);
+ }
+ break;
+ }
+ case kAtomicExchangeWord32: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label do_cs;
+ __ lay(r1, MemOperand(base, index));
+ __ LoadlW(output, MemOperand(r1));
+ __ bind(&do_cs);
+ __ cs(output, value, MemOperand(r1));
+ __ bne(&do_cs, Label::kNear);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -2481,8 +2549,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
- op == kS390_Sub64));
+ (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
+ op == kS390_Sub64 || op == kS390_Mul32));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
@@ -2495,6 +2563,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
: Operand(1));
__ bunordered(&done);
}
+
+ // TODO(john.yan): use load imm high on condition here
__ LoadImmP(reg, Operand::Zero());
__ LoadImmP(kScratchReg, Operand(1));
// locr is sufficient since reg's upper 32 is guarrantee to be 0
@@ -2543,7 +2613,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2669,6 +2741,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
+void CodeGenerator::FinishCode() {}
+
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
S390OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index b99e79f68b..d415de6587 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -12,6 +12,8 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
V(S390_And32) \
V(S390_And64) \
V(S390_Or32) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index d6ec3deaab..352e63af07 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -13,6 +13,8 @@ bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
+ case kS390_Abs32:
+ case kS390_Abs64:
case kS390_And32:
case kS390_And64:
case kS390_Or32:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index e591d3caeb..228ec3c0d5 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -43,20 +43,28 @@ OperandModes immediateModeMask =
OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
OperandMode::kUint32Imm | OperandMode::kInt20Imm;
-#define AndOperandMode \
- ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm | \
- OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
- ? OperandMode::kAllowRRR \
- : OperandMode::kBitWiseCommonMode)))
-
-#define OrOperandMode AndOperandMode
-#define XorOperandMode AndOperandMode
-
-#define ShiftOperandMode \
- ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
- (CpuFeatures::IsSupported(DISTINCT_OPS) \
- ? OperandMode::kAllowRRR \
- : OperandMode::kBitWiseCommonMode)))
+#define AndCommonMode \
+ ((OperandMode::kAllowRM | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
+ : OperandMode::kNone)))
+#define And64OperandMode AndCommonMode
+#define Or64OperandMode And64OperandMode
+#define Xor64OperandMode And64OperandMode
+
+#define And32OperandMode \
+ (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
+#define Or32OperandMode And32OperandMode
+#define Xor32OperandMode And32OperandMode
+
+#define Shift32OperandMode \
+ ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
+ : OperandMode::kNone)))
+
+#define Shift64OperandMode \
+ ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
+ OperandMode::kAllowRRR | OperandMode::kAllowRRI))
#define AddOperandMode \
((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
@@ -241,8 +249,11 @@ namespace {
bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
switch (opcode) {
+ case kS390_AddFloat:
+ case kS390_AddDouble:
case kS390_CmpFloat:
case kS390_CmpDouble:
+ case kS390_Float32ToDouble:
return true;
default:
return false;
@@ -306,49 +317,51 @@ ArchOpcode SelectLoadOpcode(Node* node) {
return opcode;
}
-bool AutoZeroExtendsWord32ToWord64(Node* node) {
-#if !V8_TARGET_ARCH_S390X
- return true;
-#else
- switch (node->opcode()) {
- case IrOpcode::kInt32Div:
- case IrOpcode::kUint32Div:
- case IrOpcode::kInt32MulHigh:
- case IrOpcode::kUint32MulHigh:
- case IrOpcode::kInt32Mod:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kWord32Clz:
- case IrOpcode::kWord32Popcnt:
- return true;
- default:
- return false;
- }
- return false;
-#endif
-}
-
-bool ZeroExtendsWord32ToWord64(Node* node) {
+#define RESULT_IS_WORD32_LIST(V) \
+ /* Float unary op*/ \
+ V(BitcastFloat32ToInt32) \
+ /* V(TruncateFloat64ToWord32) */ \
+ /* V(RoundFloat64ToInt32) */ \
+ /* V(TruncateFloat32ToInt32) */ \
+ /* V(TruncateFloat32ToUint32) */ \
+ /* V(TruncateFloat64ToUint32) */ \
+ /* V(ChangeFloat64ToInt32) */ \
+ /* V(ChangeFloat64ToUint32) */ \
+ /* Word32 unary op */ \
+ V(Word32Clz) \
+ V(Word32Popcnt) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ /* Word32 bin op */ \
+ V(Int32Add) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32AddWithOverflow) \
+ V(Int32SubWithOverflow) \
+ V(Int32MulWithOverflow) \
+ V(Int32MulHigh) \
+ V(Uint32MulHigh) \
+ V(Int32Div) \
+ V(Uint32Div) \
+ V(Int32Mod) \
+ V(Uint32Mod) \
+ V(Word32Ror) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar)
+
+bool ProduceWord32Result(Node* node) {
#if !V8_TARGET_ARCH_S390X
return true;
#else
switch (node->opcode()) {
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Shr:
- case IrOpcode::kWord32Sar:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kWord32Ror:
- case IrOpcode::kInt32Div:
- case IrOpcode::kUint32Div:
- case IrOpcode::kInt32MulHigh:
- case IrOpcode::kInt32Mod:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kWord32Popcnt:
- return true;
+#define VISITOR(name) case IrOpcode::k##name:
+ RESULT_IS_WORD32_LIST(VISITOR)
+#undef VISITOR
+ return true;
// TODO(john.yan): consider the following case to be valid
// case IrOpcode::kWord32Equal:
// case IrOpcode::kInt32LessThan:
@@ -376,6 +389,11 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
return true;
+ case MachineRepresentation::kWord8:
+ if (load_rep.IsSigned())
+ return false;
+ else
+ return true;
default:
return false;
}
@@ -386,28 +404,20 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
#endif
}
-void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+static inline bool DoZeroExtForResult(Node* node) {
+#if V8_TARGET_ARCH_S390X
+ return ProduceWord32Result(node);
+#else
+ return false;
+#endif
}
-void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
-}
+// TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
+#if 0
+void VisitShift() { }
+#endif
#if V8_TARGET_ARCH_S390X
-void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
- OperandModes operand_mode) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseOperand(node->InputAt(1), operand_mode));
-}
-
void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
@@ -425,42 +435,153 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
}
#endif
-// Shared routine for multiple binary operations.
-template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node,
+template <class CanCombineWithLoad>
+void GenerateRightOperands(InstructionSelector* selector, Node* node,
+ Node* right, InstructionCode& opcode,
+ OperandModes& operand_mode,
+ InstructionOperand* inputs, size_t& input_count,
+ CanCombineWithLoad canCombineWithLoad) {
+ S390OperandGenerator g(selector);
+
+ if ((operand_mode & OperandMode::kAllowImmediate) &&
+ g.CanBeImmediate(right, operand_mode)) {
+ inputs[input_count++] = g.UseImmediate(right);
+ // Can only be RI or RRI
+ operand_mode &= OperandMode::kAllowImmediate;
+ } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+ NodeMatcher mright(right);
+ if (mright.IsLoad() && selector->CanCover(node, right) &&
+ canCombineWithLoad(SelectLoadOpcode(right))) {
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ right, inputs, &input_count, OpcodeImmMode(opcode));
+ opcode |= AddressingModeField::encode(mode);
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ if (operand_mode & OperandMode::kAllowRM)
+ operand_mode &= ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRRM));
+ inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ // Can not be Immediate
+ operand_mode &=
+ ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRM));
+ inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ // Can not be Immediate
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ inputs[input_count++] = g.UseRegister(right);
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
+ }
+}
+
+template <class CanCombineWithLoad>
+void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
+ Node* left, Node* right, InstructionCode& opcode,
+ OperandModes& operand_mode,
+ InstructionOperand* inputs, size_t& input_count,
+ CanCombineWithLoad canCombineWithLoad) {
+ S390OperandGenerator g(selector);
+ // left is always register
+ InstructionOperand const left_input = g.UseRegister(left);
+ inputs[input_count++] = left_input;
+
+ if (left == right) {
+ inputs[input_count++] = left_input;
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
+ } else {
+ GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
+ input_count, canCombineWithLoad);
+ }
+}
+
+template <class CanCombineWithLoad>
+void VisitUnaryOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad);
+
+template <class CanCombineWithLoad>
+void VisitBinOp(InstructionSelector* selector, Node* node,
InstructionCode opcode, OperandModes operand_mode,
- FlagsContinuation* cont) {
+ FlagsContinuation* cont, CanCombineWithLoad canCombineWithLoad);
+
+// Generate The following variations:
+// VisitWord32UnaryOp, VisitWord32BinOp,
+// VisitWord64UnaryOp, VisitWord64BinOp,
+// VisitFloat32UnaryOp, VisitFloat32BinOp,
+// VisitFloat64UnaryOp, VisitFloat64BinOp
+#define VISIT_OP_LIST_32(V) \
+ V(Word32, Unary, [](ArchOpcode opcode) { \
+ return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
+ }) \
+ V(Word64, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
+ V(Float32, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
+ V(Float64, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
+ V(Word32, Bin, [](ArchOpcode opcode) { \
+ return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
+ }) \
+ V(Float32, Bin, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
+ V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
+
+#if V8_TARGET_ARCH_S390X
+#define VISIT_OP_LIST(V) \
+ VISIT_OP_LIST_32(V) \
+ V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
+#else
+#define VISIT_OP_LIST VISIT_OP_LIST_32
+#endif
+
+#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
+ static inline void Visit##type1##type2##Op( \
+ InstructionSelector* selector, Node* node, InstructionCode opcode, \
+ OperandModes operand_mode, FlagsContinuation* cont) { \
+ Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
+ canCombineWithLoad); \
+ } \
+ static inline void Visit##type1##type2##Op( \
+ InstructionSelector* selector, Node* node, InstructionCode opcode, \
+ OperandModes operand_mode) { \
+ FlagsContinuation cont; \
+ Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
+ }
+VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS);
+#undef DECLARE_VISIT_HELPER_FUNCTIONS
+#undef VISIT_OP_LIST_32
+#undef VISIT_OP_LIST
+
+template <class CanCombineWithLoad>
+void VisitUnaryOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
- Matcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- InstructionOperand inputs[4];
+ InstructionOperand inputs[8];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
+ Node* input = node->InputAt(0);
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov rax, [rbp-0x10]
- // add rax, [rbp-0x10]
- // jo label
- InstructionOperand const input = g.UseRegister(left);
- inputs[input_count++] = input;
- inputs[input_count++] = input;
- } else if (g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseImmediate(right);
- } else {
- if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseRegister(right);
+ GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
+ input_count, canCombineWithLoad);
+
+ bool input_is_word32 = ProduceWord32Result(input);
+
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = input_is_word32;
+
+ if (doZeroExt) {
+ // Add zero-ext indication
+ inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
}
if (cont->IsBranch()) {
@@ -468,14 +589,20 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
+ if (!cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure
+ // that the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (doZeroExt && canEliminateZeroExt) {
+ // we have to make sure result and left use the same register
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
} else {
- outputs[output_count++] = g.DefineAsRegister(node);
+ outputs[output_count++] = g.DefineSameAsFirst(node);
}
+
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -486,6 +613,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
opcode = cont->Encode(opcode);
+
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->kind(), cont->reason(), cont->frame_state());
@@ -497,17 +625,11 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
}
-// Shared routine for multiple binary operations.
-template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- OperandModes operand_mode) {
- FlagsContinuation cont;
- VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
-}
-
-void VisitBin32op(InstructionSelector* selector, Node* node,
- InstructionCode opcode, OperandModes operand_mode,
- FlagsContinuation* cont) {
+template <class CanCombineWithLoad>
+void VisitBinOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
@@ -517,98 +639,41 @@ void VisitBin32op(InstructionSelector* selector, Node* node,
InstructionOperand outputs[2];
size_t output_count = 0;
- // match left of TruncateInt64ToInt32
- if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
- left = left->InputAt(0);
- }
- // match right of TruncateInt64ToInt32
- if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
- right = right->InputAt(0);
- }
-
-#if V8_TARGET_ARCH_S390X
- if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
- node->op()->HasProperty(Operator::kCommutative) &&
- !g.CanBeImmediate(right, operand_mode)) {
- std::swap(left, right);
- }
-#else
if (node->op()->HasProperty(Operator::kCommutative) &&
!g.CanBeImmediate(right, operand_mode) &&
(g.CanBeBetterLeftOperand(right))) {
std::swap(left, right);
}
-#endif
- // left is always register
- InstructionOperand const left_input = g.UseRegister(left);
- inputs[input_count++] = left_input;
+ GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
+ inputs, input_count, canCombineWithLoad);
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov rax, [rbp-0x10]
- // add rax, [rbp-0x10]
- // jo label
- inputs[input_count++] = left_input;
- // Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
- } else if ((operand_mode & OperandMode::kAllowImmediate) &&
- g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseImmediate(right);
- // Can only be RI or RRI
- operand_mode &= OperandMode::kAllowImmediate;
- } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
- NodeMatcher mright(right);
- if (mright.IsLoad() && selector->CanCover(node, right) &&
- SelectLoadOpcode(right) == kS390_LoadWordU32) {
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
- operand_mode &= ~OperandMode::kAllowImmediate;
- if (operand_mode & OperandMode::kAllowRM)
- operand_mode &= ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRRM));
- inputs[input_count++] = g.Use(right);
- // Can not be Immediate
- operand_mode &=
- ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRM));
- inputs[input_count++] = g.Use(right);
- // Can not be Immediate
- operand_mode &= ~OperandMode::kAllowImmediate;
- } else {
- UNREACHABLE();
- }
- } else {
- inputs[input_count++] = g.UseRegister(right);
- // Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
- }
+ bool left_is_word32 = ProduceWord32Result(left);
- bool doZeroExt =
- AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = left_is_word32;
- inputs[input_count++] =
- g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
+ if (doZeroExt) {
+ // Add zero-ext indication
+ inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
+ }
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
- if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
+ if ((operand_mode & OperandMode::kAllowDistinctOps) &&
// If we can deoptimize as a result of the binop, we need to make sure
- // that
- // the deopt inputs are not overwritten by the binop result. One way
+ // that the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
!cont->IsDeoptimize()) {
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (doZeroExt && canEliminateZeroExt) {
+ // we have to make sure result and left use the same register
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
} else {
outputs[output_count++] = g.DefineSameAsFirst(node);
}
@@ -635,12 +700,6 @@ void VisitBin32op(InstructionSelector* selector, Node* node,
}
}
-void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- OperandModes operand_mode) {
- FlagsContinuation cont;
- VisitBin32op(selector, node, opcode, operand_mode, &cont);
-}
-
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
@@ -908,10 +967,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
}
#endif
-void InstructionSelector::VisitWord32And(Node* node) {
- VisitBin32op(this, node, kS390_And32, AndOperandMode);
-}
-
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64And(Node* node) {
S390OperandGenerator g(this);
@@ -954,46 +1009,16 @@ void InstructionSelector::VisitWord64And(Node* node) {
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
- if (match) {
+ if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(sh), g.TempImmediate(mask));
return;
}
}
}
- VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBin32op(this, node, kS390_Or32, OrOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Or(Node* node) {
- Int64BinopMatcher m(node);
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Xor(Node* node) {
- VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Xor(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Shl(Node* node) {
- VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
+ VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
}
-#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shl(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1024,7 +1049,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
- if (match) {
+ if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mask));
@@ -1033,15 +1058,9 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
+ VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
}
-#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shr(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1077,31 +1096,35 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
+ VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
}
#endif
-void InstructionSelector::VisitWord32Sar(Node* node) {
- S390OperandGenerator g(this);
+static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
+ InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
- // Replace with sign extension for (x << K) >> K where K is 16 or 24.
- if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
- bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
- Emit(kS390_ExtendSignWord16,
- doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
- return;
+ bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
+ selector->Emit(kS390_ExtendSignWord16,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(!canEliminateZeroExt));
+ return true;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
- bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
- Emit(kS390_ExtendSignWord8,
- doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
- return;
+ bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
+ selector->Emit(kS390_ExtendSignWord8,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(!canEliminateZeroExt));
+ return true;
}
}
- VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
+ return false;
}
#if !V8_TARGET_ARCH_S390X
@@ -1212,51 +1235,6 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
}
#endif
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Sar(Node* node) {
- VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Ror(Node* node) {
- // TODO(john): match dst = ror(src1, src2 + imm)
- VisitBin32op(this, node, kS390_RotRight32,
- OperandMode::kAllowRI | OperandMode::kAllowRRR |
- OperandMode::kAllowRRI | OperandMode::kShift32Imm);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Ror(Node* node) {
- VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- VisitRR(this, kS390_Cntlz32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Clz(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_Cntlz64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-#endif
-
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- S390OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Popcnt(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_Popcnt64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-#endif
-
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
#if V8_TARGET_ARCH_S390X
@@ -1269,6 +1247,14 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
+}
+
void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
@@ -1294,204 +1280,376 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBin32op(this, node, kS390_Add32, AddOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
- OperandMode::kInt32Imm);
-}
-#endif
-
-void InstructionSelector::VisitInt32Sub(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().Is(0)) {
- Node* right = m.right().node();
- bool doZeroExt = ZeroExtendsWord32ToWord64(right);
- Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
- g.TempImmediate(doZeroExt));
- } else {
- VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
- }
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Sub(Node* node) {
- S390OperandGenerator g(this);
- Int64BinopMatcher m(node);
- if (m.left().Is(0)) {
- Emit(kS390_Neg64, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate);
- }
-}
-#endif
-
-namespace {
-
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand left, InstructionOperand right,
- FlagsContinuation* cont);
-
-#if V8_TARGET_ARCH_S390X
-void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+template <class Matcher, ArchOpcode neg_opcode>
+static inline bool TryMatchNegFromSub(InstructionSelector* selector,
+ Node* node) {
S390OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
+ Matcher m(node);
+ static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
+ "Provided opcode is not a Neg opcode.");
+ if (m.left().Is(0)) {
+ Node* value = m.right().node();
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = ProduceWord32Result(value);
+ if (doZeroExt) {
+ selector->Emit(neg_opcode,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(value),
+ g.TempImmediate(!canEliminateZeroExt));
+ } else {
+ selector->Emit(neg_opcode, g.DefineAsRegister(node),
+ g.UseRegister(value));
}
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
- }
-}
-#endif
-
-} // namespace
-
-void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
- return VisitBin32op(this, node, kS390_Mul32WithOverflow,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
- &cont);
+ return true;
}
- VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
-}
-
-void InstructionSelector::VisitInt32Mul(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
- base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
- int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
- bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
- InstructionOperand dst =
- (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
- ? g.DefineAsRegister(node)
- : g.DefineSameAsFirst(node);
-
- Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
- g.TempImmediate(doZeroExt));
- return;
- }
- VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
+ return false;
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Mul(Node* node) {
- S390OperandGenerator g(this);
- Int64BinopMatcher m(node);
+template <class Matcher, ArchOpcode shift_op>
+bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
+ Matcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
- Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(power));
- return;
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = ProduceWord32Result(left);
+ InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
+ CpuFeatures::IsSupported(DISTINCT_OPS))
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (doZeroExt) {
+ selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
+ g.TempImmediate(!canEliminateZeroExt));
+ } else {
+ selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
+ }
+ return true;
}
- VisitMul(this, node, kS390_Mul64);
-}
-#endif
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- VisitBin32op(this, node, kS390_MulHigh32,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
+ return false;
}
-void InstructionSelector::VisitUint32MulHigh(Node* node) {
- VisitBin32op(this, node, kS390_MulHighU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+template <ArchOpcode opcode>
+static inline bool TryMatchInt32OpWithOverflow(InstructionSelector* selector,
+ Node* node, OperandModes mode) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ VisitWord32BinOp(selector, node, opcode, mode, &cont);
+ return true;
+ }
+ return false;
}
-void InstructionSelector::VisitInt32Div(Node* node) {
- VisitBin32op(this, node, kS390_Div32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt32AddWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
+ AddOperandMode);
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Div(Node* node) {
- VisitRRR(this, kS390_Div64, node);
+static inline bool TryMatchInt32SubWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
+ SubOperandMode);
}
-#endif
-void InstructionSelector::VisitUint32Div(Node* node) {
- VisitBin32op(this, node, kS390_DivU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt32MulWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ TryMatchInt32OpWithOverflow<kS390_Mul32>(
+ selector, node, OperandMode::kAllowRRR | OperandMode::kAllowRM);
+ } else {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ &cont);
+ }
+ return true;
+ }
+ return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(selector,
+ node);
}
#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitUint64Div(Node* node) {
- VisitRRR(this, kS390_DivU64, node);
+template <ArchOpcode opcode>
+static inline bool TryMatchInt64OpWithOverflow(InstructionSelector* selector,
+ Node* node, OperandModes mode) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ VisitWord64BinOp(selector, node, opcode, mode, &cont);
+ return true;
+ }
+ return false;
}
-#endif
-void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitBin32op(this, node, kS390_Mod32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt64AddWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
+ AddOperandMode);
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Mod(Node* node) {
- VisitRRR(this, kS390_Mod64, node);
+static inline bool TryMatchInt64SubWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
+ SubOperandMode);
}
#endif
-void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitBin32op(this, node, kS390_ModU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
-}
+static inline bool TryMatchDoubleConstructFromInsert(
+ InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Node* lo32 = NULL;
+ Node* hi32 = NULL;
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitUint64Mod(Node* node) {
- VisitRRR(this, kS390_ModU64, node);
-}
-#endif
+ if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
+ lo32 = right;
+ } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) {
+ hi32 = right;
+ } else {
+ return false; // doesn't match
+ }
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- VisitRR(this, kS390_Float32ToDouble, node);
-}
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) {
+ lo32 = left->InputAt(1);
+ } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) {
+ hi32 = left->InputAt(1);
+ } else {
+ return false; // doesn't match
+ }
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- VisitRR(this, kS390_Int32ToFloat32, node);
-}
+ if (!lo32 || !hi32) return false; // doesn't match
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- VisitRR(this, kS390_Uint32ToFloat32, node);
+ selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node),
+ g.UseRegister(hi32), g.UseRegister(lo32));
+ return true;
}
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- VisitRR(this, kS390_Int32ToDouble, node);
-}
+#define null ([]() { return false; })
+// TODO(john.yan): place kAllowRM where available
+#define FLOAT_UNARY_OP_LIST_32(V) \
+ V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
+ OperandMode::kAllowRM, null) \
+ V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
+ OperandMode::kAllowRM, null) \
+ V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
+ OperandMode::kNone, null) \
+ V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
+ OperandMode::kNone, null) \
+ V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
+ null) \
+ V(Float32, TruncateFloat32ToInt32, kS390_Float32ToInt32, OperandMode::kNone, \
+ null) \
+ V(Float32, TruncateFloat32ToUint32, kS390_Float32ToUint32, \
+ OperandMode::kNone, null) \
+ V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
+ OperandMode::kNone, null) \
+ V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
+ null) \
+ V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
+ null) \
+ V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
+ V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
+ V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
+ V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
+ null) \
+ V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
+ /* TODO(john.yan): can use kAllowRM */ \
+ V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
+ OperandMode::kNone, null) \
+ V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
+ OperandMode::kNone, null)
+
+#define FLOAT_BIN_OP_LIST(V) \
+ V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
+ V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null)
+
+#define WORD32_UNARY_OP_LIST_32(V) \
+ V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
+ V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
+ V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
+ OperandMode::kNone, null)
+
+#ifdef V8_TARGET_ARCH_S390X
+#define FLOAT_UNARY_OP_LIST(V) \
+ FLOAT_UNARY_OP_LIST_32(V) \
+ V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
+ null) \
+ V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
+ OperandMode::kNone, null)
+#define WORD32_UNARY_OP_LIST(V) \
+ WORD32_UNARY_OP_LIST_32(V) \
+ V(Word32, ChangeInt32ToInt64, kS390_ExtendSignWord32, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
+ [&]() -> bool { \
+ if (ProduceWord32Result(node->InputAt(0))) { \
+ EmitIdentity(node); \
+ return true; \
+ } \
+ return false; \
+ })
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- VisitRR(this, kS390_Uint32ToDouble, node);
-}
+#else
+#define FLOAT_UNARY_OP_LIST(V) FLOAT_UNARY_OP_LIST_32(V)
+#define WORD32_UNARY_OP_LIST(V) WORD32_UNARY_OP_LIST_32(V)
+#endif
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- VisitRR(this, kS390_DoubleToInt32, node);
-}
+#define WORD32_BIN_OP_LIST(V) \
+ V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
+ V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
+ return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
+ })) \
+ V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
+ return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
+ node); \
+ })) \
+ V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
+ ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
+ V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
+ ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
+ V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
+ ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
+ V(Word32, Int32MulHigh, kS390_MulHigh32, \
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
+ V(Word32, Uint32MulHigh, kS390_MulHighU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Int32Div, kS390_Div32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Uint32Div, kS390_DivU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Int32Mod, kS390_Mod32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Uint32Mod, kS390_ModU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Word32Ror, kS390_RotRight32, \
+ OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
+ OperandMode::kShift32Imm, \
+ null) \
+ V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
+ V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
+ V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
+ V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
+ V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
+ V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
+ [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); }) \
+ V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
+ OperandMode::kAllowRRR, \
+ [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
+ V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
+ OperandMode::kAllowRRR, \
+ [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); })
+
+#define WORD64_UNARY_OP_LIST(V) \
+ V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
+ V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
+ V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
+ OperandMode::kNone, null)
+
+#define WORD64_BIN_OP_LIST(V) \
+ V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
+ V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
+ return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
+ })) \
+ V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
+ ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
+ V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
+ ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
+ V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
+ return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
+ node); \
+ })) \
+ V(Word64, Int64Div, kS390_Div64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Uint64Div, kS390_DivU64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Int64Mod, kS390_Mod64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Uint64Mod, kS390_ModU64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null) \
+ V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
+ V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
+ V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null)
+
+#define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
+ void InstructionSelector::Visit##name(Node* node) { \
+ if (std::function<bool()>(try_extra)()) return; \
+ Visit##type##UnaryOp(this, node, op, mode); \
+ }
+
+#define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
+ void InstructionSelector::Visit##name(Node* node) { \
+ if (std::function<bool()>(try_extra)()) return; \
+ Visit##type##BinOp(this, node, op, mode); \
+ }
+
+WORD32_BIN_OP_LIST(DECLARE_BIN_OP);
+WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP);
+FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP);
+FLOAT_BIN_OP_LIST(DECLARE_BIN_OP);
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- VisitRR(this, kS390_DoubleToUint32, node);
-}
+#if V8_TARGET_ARCH_S390X
+WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
+WORD64_BIN_OP_LIST(DECLARE_BIN_OP)
+#endif
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- VisitRR(this, kS390_DoubleToUint32, node);
-}
+#undef DECLARE_BIN_OP
+#undef DECLARE_UNARY_OP
+#undef WORD64_BIN_OP_LIST
+#undef WORD64_UNARY_OP_LIST
+#undef WORD32_BIN_OP_LIST
+#undef WORD32_UNARY_OP_LIST
+#undef FLOAT_UNARY_OP_LIST
+#undef WORD32_UNARY_OP_LIST_32
+#undef FLOAT_BIN_OP_LIST
+#undef FLOAT_BIN_OP_LIST_32
+#undef null
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
@@ -1510,121 +1668,8 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
}
-void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- // TODO(mbrandy): inspect input to see if nop is appropriate.
- VisitRR(this, kS390_ExtendSignWord32, node);
-}
-
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- S390OperandGenerator g(this);
- Node* value = node->InputAt(0);
- if (ZeroExtendsWord32ToWord64(value)) {
- // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
- // zero-extension is a no-op.
- return EmitIdentity(node);
- }
- VisitRR(this, kS390_Uint32ToUint64, node);
-}
#endif
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRR(this, kS390_DoubleToFloat32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRR(this, kS390_DoubleToInt32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- VisitRR(this, kS390_Float32ToInt32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- VisitRR(this, kS390_Float32ToUint32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
- // TODO(mbrandy): inspect input to see if nop is appropriate.
- VisitRR(this, kS390_Int64ToInt32, node);
-}
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
- VisitRR(this, kS390_Int64ToFloat32, node);
-}
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
- VisitRR(this, kS390_Int64ToDouble, node);
-}
-
-void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
- VisitRR(this, kS390_Uint64ToFloat32, node);
-}
-
-void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
- VisitRR(this, kS390_Uint64ToDouble, node);
-}
-#endif
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kS390_BitcastFloat32ToInt32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
- VisitRR(this, kS390_BitcastDoubleToInt64, node);
-}
-#endif
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- VisitRR(this, kS390_BitcastInt32ToFloat32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
- VisitRR(this, kS390_BitcastInt64ToDouble, node);
-}
-#endif
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- VisitRRR(this, kS390_AddFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- // TODO(mbrandy): detect multiply-add
- VisitRRR(this, kS390_AddDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- VisitRRR(this, kS390_SubFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- // TODO(mbrandy): detect multiply-subtract
- VisitRRR(this, kS390_SubDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRRR(this, kS390_MulFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- // TODO(mbrandy): detect negate
- VisitRRR(this, kS390_MulDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRRR(this, kS390_DivFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRR(this, kS390_DivDouble, node);
-}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
@@ -1632,38 +1677,6 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
->MarkAsCall();
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kS390_MaxFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kS390_MaxDouble, node);
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- VisitRR(this, kS390_Float64SilenceNaN, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kS390_MinFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kS390_MinDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitRR(this, kS390_AbsFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitRR(this, kS390_AbsDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRR(this, kS390_SqrtFloat, node);
-}
-
void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
InstructionCode opcode) {
S390OperandGenerator g(this);
@@ -1679,38 +1692,6 @@ void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
->MarkAsCall();
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRR(this, kS390_SqrtDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, kS390_FloorFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRR(this, kS390_FloorDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, kS390_CeilFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- VisitRR(this, kS390_CeilDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, kS390_TruncateFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, kS390_TruncateDouble, node);
-}
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- VisitRR(this, kS390_RoundDouble, node);
-}
-
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
UNREACHABLE();
}
@@ -1719,58 +1700,6 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitRR(this, kS390_NegFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitRR(this, kS390_NegDouble, node);
-}
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- OperandModes mode = AddOperandMode;
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBin32op(this, node, kS390_Add32, mode, &cont);
- }
- FlagsContinuation cont;
- VisitBin32op(this, node, kS390_Add32, mode, &cont);
-}
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- OperandModes mode = SubOperandMode;
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
- }
- FlagsContinuation cont;
- VisitBin32op(this, node, kS390_Sub32, mode, &cont);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
- OperandMode::kInt32Imm, &cont);
- }
- FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
- &cont);
-}
-
-void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate, &cont);
- }
- FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate, &cont);
-}
-#endif
-
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
@@ -2114,28 +2043,35 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
- cont);
+ return VisitWord32BinOp(selector, node, kS390_Add32,
+ AddOperandMode, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
- cont);
+ return VisitWord32BinOp(selector, node, kS390_Sub32,
+ SubOperandMode, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return VisitBin32op(
+ return VisitWord32BinOp(
selector, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
cont);
+ case IrOpcode::kInt32AbsWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord32UnaryOp(selector, node, kS390_Abs32,
+ OperandMode::kNone, cont);
#if V8_TARGET_ARCH_S390X
+ case IrOpcode::kInt64AbsWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord64UnaryOp(selector, node, kS390_Abs64,
+ OperandMode::kNone, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
+ return VisitWord64BinOp(selector, node, kS390_Add64,
+ AddOperandMode, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
- cont);
+ return VisitWord64BinOp(selector, node, kS390_Sub64,
+ SubOperandMode, cont);
#endif
default:
break;
@@ -2165,9 +2101,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// can't handle overflow case.
break;
case IrOpcode::kWord32Or:
- return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord32BinOp(selector, value, kS390_Or32, Or32OperandMode,
+ cont);
+ break;
case IrOpcode::kWord32Xor:
- return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord32BinOp(selector, value, kS390_Xor32,
+ Xor32OperandMode, cont);
+ break;
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
@@ -2185,10 +2127,14 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// can't handle overflow case.
break;
case IrOpcode::kWord64Or:
- // TODO(john.yan): need to handle
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord64BinOp(selector, value, kS390_Or64, Or64OperandMode,
+ cont);
break;
case IrOpcode::kWord64Xor:
- // TODO(john.yan): need to handle
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord64BinOp(selector, value, kS390_Xor64,
+ Xor64OperandMode, cont);
break;
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shl:
@@ -2424,48 +2370,6 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
- S390OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
- CanCover(node, left)) {
- left = left->InputAt(1);
- Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
- g.UseRegister(right));
- return;
- }
- Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
- g.UseRegister(left), g.UseRegister(right));
-}
-
-void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
- S390OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
- CanCover(node, left)) {
- left = left->InputAt(1);
- Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
- g.UseRegister(left));
- return;
- }
- Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
- g.UseRegister(left), g.UseRegister(right));
-}
-
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
@@ -2521,6 +2425,54 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs);
}
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2534,6 +2486,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord32ReverseBytes |
MachineOperatorBuilder::kWord64ReverseBytes |
+ MachineOperatorBuilder::kInt32AbsWithOverflow |
+ MachineOperatorBuilder::kInt64AbsWithOverflow |
MachineOperatorBuilder::kWord64Popcnt;
}
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index dcc84b31ed..ea218671ad 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -27,8 +27,11 @@ BasicBlock::BasicBlock(Zone* zone, Id id)
nodes_(zone),
successors_(zone),
predecessors_(zone),
- id_(id) {}
-
+#if DEBUG
+ debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)),
+#endif
+ id_(id) {
+}
bool BasicBlock::LoopContains(BasicBlock* block) const {
// RPO numbers must be initialized.
@@ -93,6 +96,24 @@ BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
return b1;
}
+std::ostream& operator<<(std::ostream& os, const BasicBlock& block) {
+ os << "B" << block.id();
+#if DEBUG
+ AssemblerDebugInfo info = block.debug_info();
+ if (info.name) os << info;
+ // Print predecessor blocks for better debugging.
+ const int kMaxDisplayedBlocks = 4;
+ int i = 0;
+ const BasicBlock* current_block = &block;
+ while (current_block->PredecessorCount() > 0 && i++ < kMaxDisplayedBlocks) {
+ current_block = current_block->predecessors().front();
+ os << " <= B" << current_block->id();
+ info = current_block->debug_info();
+ if (info.name) os << info;
+ }
+#endif
+ return os;
+}
std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
switch (c) {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 3f9750cd70..b5e696dc41 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -20,11 +20,9 @@ class BasicBlock;
class BasicBlockInstrumentor;
class Node;
-
typedef ZoneVector<BasicBlock*> BasicBlockVector;
typedef ZoneVector<Node*> NodeVector;
-
// A basic block contains an ordered list of nodes and ends with a control
// node. Note that if a basic block has phis, then all phis must appear as the
// first nodes in the block.
@@ -60,6 +58,12 @@ class V8_EXPORT_PRIVATE BasicBlock final
BasicBlock(Zone* zone, Id id);
Id id() const { return id_; }
+#if DEBUG
+ void set_debug_info(AssemblerDebugInfo debug_info) {
+ debug_info_ = debug_info;
+ }
+ AssemblerDebugInfo debug_info() const { return debug_info_; }
+#endif // DEBUG
// Predecessors.
BasicBlockVector& predecessors() { return predecessors_; }
@@ -167,11 +171,15 @@ class V8_EXPORT_PRIVATE BasicBlock final
BasicBlockVector successors_;
BasicBlockVector predecessors_;
+#if DEBUG
+ AssemblerDebugInfo debug_info_;
+#endif
Id id_;
DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
+std::ostream& operator<<(std::ostream&, const BasicBlock&);
std::ostream& operator<<(std::ostream&, const BasicBlock::Control&);
std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index b4e74d98fe..76889a69cb 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -25,7 +25,8 @@ namespace compiler {
if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
} while (false)
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
+ size_t node_count_hint)
: zone_(zone),
graph_(graph),
schedule_(schedule),
@@ -33,13 +34,23 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
- node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
-
+ node_data_(zone) {
+ node_data_.reserve(node_count_hint);
+ node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
+}
Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
- Schedule* schedule = new (graph->zone())
- Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
- Scheduler scheduler(zone, graph, schedule, flags);
+ Zone* schedule_zone =
+ (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
+
+ // Reserve 10% more space for nodes if node splitting is enabled to try to
+ // avoid resizing the vector since that would triple its zone memory usage.
+ float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
+ size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
+
+ Schedule* schedule =
+ new (schedule_zone) Schedule(schedule_zone, node_count_hint);
+ Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -586,7 +597,9 @@ void Scheduler::BuildCFG() {
control_flow_builder_->Run();
// Initialize per-block data.
- scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+ // Reserve an extra 10% to avoid resizing vector when fusing floating control.
+ scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
+ scheduled_nodes_.resize(schedule_->BasicBlockCount());
}
@@ -1326,7 +1339,8 @@ void Scheduler::ScheduleEarly() {
class ScheduleLateNodeVisitor {
public:
ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler),
+ : zone_(zone),
+ scheduler_(scheduler),
schedule_(scheduler_->schedule_),
marked_(scheduler->zone_),
marking_queue_(scheduler->zone_) {}
@@ -1619,7 +1633,12 @@ class ScheduleLateNodeVisitor {
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
- scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
+ size_t block_id = block->id().ToSize();
+ if (!scheduler_->scheduled_nodes_[block_id]) {
+ scheduler_->scheduled_nodes_[block_id] =
+ new (zone_->New(sizeof(NodeVector))) NodeVector(zone_);
+ }
+ scheduler_->scheduled_nodes_[block_id]->push_back(node);
scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
}
@@ -1638,6 +1657,7 @@ class ScheduleLateNodeVisitor {
return copy;
}
+ Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
BoolVector marked_;
@@ -1674,11 +1694,13 @@ void Scheduler::SealFinalSchedule() {
// Add collected nodes for basic blocks to their blocks in the right order.
int block_num = 0;
- for (NodeVector& nodes : scheduled_nodes_) {
+ for (NodeVector* nodes : scheduled_nodes_) {
BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
BasicBlock* block = schedule_->GetBlockById(id);
- for (Node* node : base::Reversed(nodes)) {
- schedule_->AddNode(block, node);
+ if (nodes) {
+ for (Node* node : base::Reversed(*nodes)) {
+ schedule_->AddNode(block, node);
+ }
}
}
}
@@ -1728,7 +1750,7 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// Move previously planned nodes.
// TODO(mstarzinger): Improve that by supporting bulk moves.
- scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+ scheduled_nodes_.resize(schedule_->BasicBlockCount());
MovePlannedNodes(block, schedule_->block(node));
if (FLAG_trace_turbo_scheduler) {
@@ -1741,12 +1763,20 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
to->id().ToInt());
- NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
- for (Node* const node : *nodes) {
+ NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
+ NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
+ if (!from_nodes) return;
+
+ for (Node* const node : *from_nodes) {
schedule_->SetBlockForNode(to, node);
- scheduled_nodes_[to->id().ToSize()].push_back(node);
}
- nodes->clear();
+ if (to_nodes) {
+ to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
+ from_nodes->clear();
+ } else {
+ std::swap(scheduled_nodes_[from->id().ToSize()],
+ scheduled_nodes_[to->id().ToSize()]);
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 1a08e4c019..4d297e1756 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -29,12 +29,12 @@ class SpecialRPONumberer;
class V8_EXPORT_PRIVATE Scheduler {
public:
// Flags that control the mode of operation.
- enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1 };
+ enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1, kTempSchedule = 1u << 2 };
typedef base::Flags<Flag> Flags;
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* zone, Graph* graph, Flags flags);
+ static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -65,7 +65,8 @@ class V8_EXPORT_PRIVATE Scheduler {
Graph* graph_;
Schedule* schedule_;
Flags flags_;
- NodeVectorVector scheduled_nodes_; // Per-block list of nodes in reverse.
+ ZoneVector<NodeVector*>
+ scheduled_nodes_; // Per-block list of nodes in reverse.
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
ZoneVector<SchedulerData> node_data_; // Per-node data for all nodes.
@@ -73,7 +74,8 @@ class V8_EXPORT_PRIVATE Scheduler {
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
- Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags);
+ Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
+ size_t node_count_hint_);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 19ffe93775..8f967788db 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -17,24 +17,19 @@ namespace internal {
namespace compiler {
SimdScalarLowering::SimdScalarLowering(
- Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, Zone* zone,
- Signature<MachineRepresentation>* signature)
- : zone_(zone),
- graph_(graph),
- machine_(machine),
- common_(common),
- state_(graph, 3),
- stack_(zone),
+ JSGraph* jsgraph, Signature<MachineRepresentation>* signature)
+ : jsgraph_(jsgraph),
+ state_(jsgraph->graph(), 3),
+ stack_(jsgraph_->zone()),
replacements_(nullptr),
signature_(signature),
- placeholder_(
- graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())),
+ placeholder_(graph()->NewNode(common()->Parameter(-2, "placeholder"),
+ graph()->start())),
parameter_count_after_lowering_(-1) {
- DCHECK_NOT_NULL(graph);
- DCHECK_NOT_NULL(graph->end());
- replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
- memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+ DCHECK_NOT_NULL(graph());
+ DCHECK_NOT_NULL(graph()->end());
+ replacements_ = zone()->NewArray<Replacement>(graph()->NodeCount());
+ memset(replacements_, 0, sizeof(Replacement) * graph()->NodeCount());
}
void SimdScalarLowering::LowerGraph() {
@@ -72,16 +67,58 @@ void SimdScalarLowering::LowerGraph() {
}
#define FOREACH_INT32X4_OPCODE(V) \
- V(Int32x4Add) \
- V(Int32x4ExtractLane) \
- V(CreateInt32x4) \
- V(Int32x4ReplaceLane)
+ V(I32x4Splat) \
+ V(I32x4ExtractLane) \
+ V(I32x4ReplaceLane) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4) \
+ V(I32x4Neg) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor) \
+ V(S128Not)
#define FOREACH_FLOAT32X4_OPCODE(V) \
- V(Float32x4Add) \
- V(Float32x4ExtractLane) \
- V(CreateFloat32x4) \
- V(Float32x4ReplaceLane)
+ V(F32x4Splat) \
+ V(F32x4ExtractLane) \
+ V(F32x4ReplaceLane) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max)
+
+#define FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(V) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Gt) \
+ V(F32x4Ge)
+
+#define FOREACH_INT32X4_TO_SIMD1X4OPCODE(V) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4LtS) \
+ V(I32x4LeS) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4LtU) \
+ V(I32x4LeU) \
+ V(I32x4GtU) \
+ V(I32x4GeU)
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
@@ -97,9 +134,35 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kFloat32;
break;
}
+ FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
+ FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kSimd1x4;
+ break;
+ }
+ default: {
+ switch (output->opcode()) {
+ FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
+ case IrOpcode::kF32x4SConvertI32x4:
+ case IrOpcode::kF32x4UConvertI32x4: {
+ replacements_[node->id()].type = SimdType::kInt32;
+ break;
+ }
+ FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
+ case IrOpcode::kI32x4SConvertF32x4:
+ case IrOpcode::kI32x4UConvertF32x4: {
+ replacements_[node->id()].type = SimdType::kFloat32;
+ break;
+ }
+ case IrOpcode::kS32x4Select: {
+ replacements_[node->id()].type = SimdType::kSimd1x4;
+ break;
+ }
+ default: {
+ replacements_[node->id()].type = replacements_[output->id()].type;
+ }
+ }
+ }
#undef CASE_STMT
- default:
- replacements_[node->id()].type = replacements_[output->id()].type;
}
}
@@ -219,14 +282,142 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
}
-void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type,
+void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
+ const Operator* op, bool invert_inputs) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ if (invert_inputs) {
+ rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
+ } else {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ }
+ ReplaceNode(node, rep_node);
+}
+
+void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
+ const Operator* op) {
+ DCHECK(node->InputCount() == 1);
+ Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep[i]);
+ }
+ ReplaceNode(node, rep_node);
+}
+
+void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
+ bool is_max) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
+ Node** rep_right =
+ GetReplacementsWithType(node->InputAt(1), SimdType::kInt32);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ Diamond d(graph(), common(),
+ graph()->NewNode(op, rep_left[i], rep_right[i]));
+ if (is_max) {
+ rep_node[i] =
+ d.Phi(MachineRepresentation::kWord32, rep_right[i], rep_left[i]);
+ } else {
+ rep_node[i] =
+ d.Phi(MachineRepresentation::kWord32, rep_left[i], rep_right[i]);
+ }
+ }
+ ReplaceNode(node, rep_node);
+}
+
+Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
+ if (machine()->Float64RoundTruncate().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
+ } else {
+ ExternalReference ref =
+ ExternalReference::wasm_f64_trunc(jsgraph_->isolate());
+ Node* stack_slot =
+ graph()->NewNode(machine()->StackSlot(MachineRepresentation::kFloat64));
+ const Operator* store_op = machine()->Store(
+ StoreRepresentation(MachineRepresentation::kFloat64, kNoWriteBarrier));
+ Node* effect =
+ graph()->NewNode(store_op, stack_slot, jsgraph_->Int32Constant(0),
+ input, graph()->start(), graph()->start());
+ Node* function = graph()->NewNode(common()->ExternalConstant(ref));
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = function;
+ args[1] = stack_slot;
+ args[2] = effect;
+ args[3] = graph()->start();
+ Signature<MachineType>::Builder sig_builder(zone(), 0, 1);
+ sig_builder.AddParam(MachineType::Pointer());
+ CallDescriptor* desc =
+ Linkage::GetSimplifiedCDescriptor(zone(), sig_builder.Build());
+ Node* call = graph()->NewNode(common()->Call(desc), 4, args);
+ return graph()->NewNode(machine()->Load(LoadRepresentation::Float64()),
+ stack_slot, jsgraph_->Int32Constant(0), call,
+ graph()->start());
+ }
+}
+
+void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
+ DCHECK(node->InputCount() == 1);
+ Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32);
+ Node* rep_node[kMaxLanes];
+ Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
+ Node* min = graph()->NewNode(
+ common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
+ Node* max = graph()->NewNode(common()->Float64Constant(
+ static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
+ for (int i = 0; i < kMaxLanes; ++i) {
+ Node* double_rep =
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
+ Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
+ double_rep, double_rep));
+ Node* temp =
+ nan_d.Phi(MachineRepresentation::kFloat64, double_rep, double_zero);
+ Diamond min_d(graph(), common(),
+ graph()->NewNode(machine()->Float64LessThan(), temp, min));
+ temp = min_d.Phi(MachineRepresentation::kFloat64, min, temp);
+ Diamond max_d(graph(), common(),
+ graph()->NewNode(machine()->Float64LessThan(), max, temp));
+ temp = max_d.Phi(MachineRepresentation::kFloat64, max, temp);
+ Node* trunc = BuildF64Trunc(temp);
+ if (is_signed) {
+ rep_node[i] = graph()->NewNode(machine()->ChangeFloat64ToInt32(), trunc);
+ } else {
+ rep_node[i] =
+ graph()->NewNode(machine()->TruncateFloat64ToUint32(), trunc);
+ }
+ }
+ ReplaceNode(node, rep_node);
+}
+
+void SimdScalarLowering::LowerShiftOp(Node* node, const Operator* op) {
+ static int32_t shift_mask = 0x1f;
+ DCHECK_EQ(1, node->InputCount());
+ int32_t shift_amount = OpParameter<int32_t>(node);
+ Node* shift_node =
+ graph()->NewNode(common()->Int32Constant(shift_amount & shift_mask));
+ Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep[i], shift_node);
+ }
+ ReplaceNode(node, rep_node);
+}
+
+void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
const Operator* op) {
DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ Diamond d(graph(), common(),
+ graph()->NewNode(op, rep_left[i], rep_right[i]));
+ rep_node[i] = d.Phi(MachineRepresentation::kWord32,
+ jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
}
ReplaceNode(node, rep_node);
}
@@ -377,29 +568,120 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kInt32x4Add: {
- LowerBinaryOp(node, rep_type, machine()->Int32Add());
+#define I32X4_BINOP_CASE(opcode, instruction) \
+ case IrOpcode::opcode: { \
+ LowerBinaryOp(node, rep_type, machine()->instruction()); \
+ break; \
+ }
+ I32X4_BINOP_CASE(kI32x4Add, Int32Add)
+ I32X4_BINOP_CASE(kI32x4Sub, Int32Sub)
+ I32X4_BINOP_CASE(kI32x4Mul, Int32Mul)
+ I32X4_BINOP_CASE(kS128And, Word32And)
+ I32X4_BINOP_CASE(kS128Or, Word32Or)
+ I32X4_BINOP_CASE(kS128Xor, Word32Xor)
+#undef I32X4_BINOP_CASE
+ case IrOpcode::kI32x4MaxS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), true);
+ break;
+ }
+ case IrOpcode::kI32x4MinS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), false);
+ break;
+ }
+ case IrOpcode::kI32x4MaxU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), true);
+ break;
+ }
+ case IrOpcode::kI32x4MinU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), false);
+ break;
+ }
+ case IrOpcode::kI32x4Neg: {
+ DCHECK(node->InputCount() == 1);
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node* rep_node[kMaxLanes];
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+ case IrOpcode::kS128Not: {
+ DCHECK(node->InputCount() == 1);
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node* rep_node[kMaxLanes];
+ Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+ case IrOpcode::kI32x4SConvertF32x4: {
+ LowerConvertFromFloat(node, true);
+ break;
+ }
+ case IrOpcode::kI32x4UConvertF32x4: {
+ LowerConvertFromFloat(node, false);
+ break;
+ }
+ case IrOpcode::kI32x4Shl: {
+ LowerShiftOp(node, machine()->Word32Shl());
+ break;
+ }
+ case IrOpcode::kI32x4ShrS: {
+ LowerShiftOp(node, machine()->Word32Sar());
break;
}
- case IrOpcode::kFloat32x4Add: {
- LowerBinaryOp(node, rep_type, machine()->Float32Add());
+ case IrOpcode::kI32x4ShrU: {
+ LowerShiftOp(node, machine()->Word32Shr());
+ break;
+ }
+#define F32X4_BINOP_CASE(name) \
+ case IrOpcode::kF32x4##name: { \
+ LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
+ break; \
+ }
+ F32X4_BINOP_CASE(Add)
+ F32X4_BINOP_CASE(Sub)
+ F32X4_BINOP_CASE(Mul)
+ F32X4_BINOP_CASE(Div)
+ F32X4_BINOP_CASE(Min)
+ F32X4_BINOP_CASE(Max)
+#undef F32X4_BINOP_CASE
+#define F32X4_UNOP_CASE(name) \
+ case IrOpcode::kF32x4##name: { \
+ LowerUnaryOp(node, rep_type, machine()->Float32##name()); \
+ break; \
+ }
+ F32X4_UNOP_CASE(Abs)
+ F32X4_UNOP_CASE(Neg)
+ F32X4_UNOP_CASE(Sqrt)
+#undef F32x4_UNOP_CASE
+ case IrOpcode::kF32x4SConvertI32x4: {
+ LowerUnaryOp(node, SimdType::kInt32, machine()->RoundInt32ToFloat32());
break;
}
- case IrOpcode::kCreateInt32x4:
- case IrOpcode::kCreateFloat32x4: {
+ case IrOpcode::kF32x4UConvertI32x4: {
+ LowerUnaryOp(node, SimdType::kInt32, machine()->RoundUint32ToFloat32());
+ break;
+ }
+ case IrOpcode::kI32x4Splat:
+ case IrOpcode::kF32x4Splat: {
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
- if (HasReplacement(0, node->InputAt(i))) {
- rep_node[i] = GetReplacements(node->InputAt(i))[0];
+ if (HasReplacement(0, node->InputAt(0))) {
+ rep_node[i] = GetReplacements(node->InputAt(0))[0];
} else {
- rep_node[i] = node->InputAt(i);
+ rep_node[i] = node->InputAt(0);
}
}
ReplaceNode(node, rep_node);
break;
}
- case IrOpcode::kInt32x4ExtractLane:
- case IrOpcode::kFloat32x4ExtractLane: {
+ case IrOpcode::kI32x4ExtractLane:
+ case IrOpcode::kF32x4ExtractLane: {
int32_t lane = OpParameter<int32_t>(node);
Node* rep_node[kMaxLanes] = {
GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
@@ -407,8 +689,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node);
break;
}
- case IrOpcode::kInt32x4ReplaceLane:
- case IrOpcode::kFloat32x4ReplaceLane: {
+ case IrOpcode::kI32x4ReplaceLane:
+ case IrOpcode::kF32x4ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
@@ -422,6 +704,58 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node);
break;
}
+#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
+ case IrOpcode::simd_op: { \
+ LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
+ break; \
+ }
+ COMPARISON_CASE(Float32, kF32x4Eq, Float32Equal, false)
+ COMPARISON_CASE(Float32, kF32x4Lt, Float32LessThan, false)
+ COMPARISON_CASE(Float32, kF32x4Le, Float32LessThanOrEqual, false)
+ COMPARISON_CASE(Float32, kF32x4Gt, Float32LessThan, true)
+ COMPARISON_CASE(Float32, kF32x4Ge, Float32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32, kI32x4Eq, Word32Equal, false)
+ COMPARISON_CASE(Int32, kI32x4LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int32, kI32x4LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32, kI32x4GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int32, kI32x4GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32, kI32x4LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int32, kI32x4LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32, kI32x4GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int32, kI32x4GeU, Uint32LessThanOrEqual, true)
+#undef COMPARISON_CASE
+ case IrOpcode::kF32x4Ne: {
+ LowerNotEqual(node, SimdType::kFloat32, machine()->Float32Equal());
+ break;
+ }
+ case IrOpcode::kI32x4Ne: {
+ LowerNotEqual(node, SimdType::kInt32, machine()->Word32Equal());
+ break;
+ }
+ case IrOpcode::kS32x4Select: {
+ DCHECK(node->InputCount() == 3);
+ DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4);
+ Node** boolean_input = GetReplacements(node->InputAt(0));
+ Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
+ jsgraph_->Int32Constant(0)));
+ if (rep_type == SimdType::kFloat32) {
+ rep_node[i] =
+ d.Phi(MachineRepresentation::kFloat32, rep_right[1], rep_left[0]);
+ } else if (rep_type == SimdType::kInt32) {
+ rep_node[i] =
+ d.Phi(MachineRepresentation::kWord32, rep_right[1], rep_left[0]);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
default: { DefaultLowering(node); }
}
}
@@ -483,7 +817,8 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
result[i] = nullptr;
}
}
- } else {
+ } else if (ReplacementType(node) == SimdType::kFloat32 &&
+ type == SimdType::kInt32) {
for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
@@ -492,6 +827,8 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
result[i] = nullptr;
}
}
+ } else {
+ UNREACHABLE();
}
return result;
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index c795c6b88b..70186fdf11 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
#include "src/zone/zone-containers.h"
@@ -17,8 +18,7 @@ namespace compiler {
class SimdScalarLowering {
public:
- SimdScalarLowering(Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, Zone* zone,
+ SimdScalarLowering(JSGraph* jsgraph,
Signature<MachineRepresentation>* signature);
void LowerGraph();
@@ -28,7 +28,7 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kInt32, kFloat32 };
+ enum class SimdType : uint8_t { kInt32, kFloat32, kSimd1x4 };
static const int kMaxLanes = 4;
static const int kLaneWidth = 16 / kMaxLanes;
@@ -38,10 +38,15 @@ class SimdScalarLowering {
SimdType type; // represents what input type is expected
};
- Zone* zone() const { return zone_; }
- Graph* graph() const { return graph_; }
- MachineOperatorBuilder* machine() const { return machine_; }
- CommonOperatorBuilder* common() const { return common_; }
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
+
+ Zone* zone() const { return jsgraph_->zone(); }
+ Graph* graph() const { return jsgraph_->graph(); }
+ MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
Signature<MachineRepresentation>* signature() const { return signature_; }
void LowerNode(Node* node);
@@ -59,17 +64,16 @@ class SimdScalarLowering {
const Operator* load_op);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
- void LowerBinaryOp(Node* node, SimdType rep_type, const Operator* op);
-
- struct NodeState {
- Node* node;
- int input_index;
- };
-
- Zone* zone_;
- Graph* const graph_;
- MachineOperatorBuilder* machine_;
- CommonOperatorBuilder* common_;
+ void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
+ bool invert_inputs = false);
+ void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
+ void LowerIntMinMax(Node* node, const Operator* op, bool is_max);
+ void LowerConvertFromFloat(Node* node, bool is_signed);
+ void LowerShiftOp(Node* node, const Operator* op);
+ Node* BuildF64Trunc(Node* input);
+ void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
+
+ JSGraph* const jsgraph_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 4acc77f22f..d0f952a9ec 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -88,13 +88,13 @@ MachineRepresentation MachineRepresentationFromArrayType(
}
UseInfo CheckedUseInfoAsWord32FromHint(
- NumberOperationHint hint, CheckForMinusZeroMode minus_zero_mode =
- CheckForMinusZeroMode::kCheckForMinusZero) {
+ NumberOperationHint hint,
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
- return UseInfo::CheckedSignedSmallAsWord32(minus_zero_mode);
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros);
case NumberOperationHint::kSigned32:
- return UseInfo::CheckedSigned32AsWord32(minus_zero_mode);
+ return UseInfo::CheckedSigned32AsWord32(identify_zeros);
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsWord32();
case NumberOperationHint::kNumberOrOddball:
@@ -454,10 +454,32 @@ class RepresentationSelector {
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = \
+ Type::Intersect(op_typer_.Name(FeedbackTypeOf(node->InputAt(0))), \
+ info->restriction_type(), graph_zone()); \
+ break; \
+ }
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
case IrOpcode::kPlainPrimitiveToNumber:
new_type = op_typer_.ToNumber(FeedbackTypeOf(node->InputAt(0)));
break;
+ case IrOpcode::kCheckFloat64Hole:
+ new_type = Type::Intersect(
+ op_typer_.CheckFloat64Hole(FeedbackTypeOf(node->InputAt(0))),
+ info->restriction_type(), graph_zone());
+ break;
+
+ case IrOpcode::kCheckNumber:
+ new_type = Type::Intersect(
+ op_typer_.CheckNumber(FeedbackTypeOf(node->InputAt(0))),
+ info->restriction_type(), graph_zone());
+ break;
+
case IrOpcode::kPhi: {
new_type = TypePhi(node);
if (type != nullptr) {
@@ -809,6 +831,15 @@ class RepresentationSelector {
if (lower()) Kill(node);
}
+ // Helper for no-op node.
+ void VisitNoop(Node* node, Truncation truncation) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ MachineRepresentation representation =
+ GetOutputInfoForPhi(node, TypeOf(node), truncation);
+ VisitUnop(node, UseInfo(representation, truncation), representation);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ }
+
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
MachineRepresentation output,
@@ -840,11 +871,12 @@ class RepresentationSelector {
}
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
+ void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output,
+ Type* restriction_type = Type::Any()) {
DCHECK_EQ(1, node->op()->ValueInputCount());
ProcessInput(node, 0, input_use);
ProcessRemainingInputs(node, 1);
- SetOutput(node, output);
+ SetOutput(node, output, restriction_type);
}
// Helper for leaf nodes.
@@ -1167,7 +1199,7 @@ class RepresentationSelector {
// If one of the inputs is positive and/or truncation is being applied,
// there is no need to return -0.
CheckForMinusZeroMode mz_mode =
- truncation.IsUsedAsWord32() ||
+ truncation.IdentifiesZeroAndMinusZero() ||
(input0_type->Is(Type::OrderedNumber()) &&
input0_type->Min() > 0) ||
(input1_type->Is(Type::OrderedNumber()) &&
@@ -1226,13 +1258,23 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32, Type::Signed32());
} else {
- UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+ // If the output's truncation is identify-zeros, we can pass it
+ // along. Moreover, if the operation is addition and we know the
+ // right-hand side is not minus zero, we do not have to distinguish
+ // between 0 and -0.
+ IdentifyZeros left_identify_zeros = truncation.identify_zeros();
+ if (node->opcode() == IrOpcode::kSpeculativeNumberAdd &&
+ !right_feedback_type->Maybe(Type::MinusZero())) {
+ left_identify_zeros = kIdentifyZeros;
+ }
+ UseInfo left_use =
+ CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
// For CheckedInt32Add and CheckedInt32Sub, we don't need to do
// a minus zero check for the right hand side, since we already
// know that the left hand side is a proper Signed32 value,
// potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(
- hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+ UseInfo right_use =
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
Type::Signed32());
}
@@ -2061,20 +2103,27 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNumberMax: {
- // TODO(turbofan): We should consider feedback types here as well.
- if (BothInputsAreUnsigned32(node)) {
+ // It is safe to use the feedback types for left and right hand side
+ // here, since we can only narrow those types and thus we can only
+ // promise a more specific truncation.
+ Type* const lhs_type = TypeOf(node->InputAt(0));
+ Type* const rhs_type = TypeOf(node->InputAt(1));
+ if (lhs_type->Is(Type::Unsigned32()) &&
+ rhs_type->Is(Type::Unsigned32())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (BothInputsAreSigned32(node)) {
+ } else if (lhs_type->Is(Type::Signed32()) &&
+ rhs_type->Is(Type::Signed32())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (BothInputsAre(node, Type::PlainNumber())) {
+ } else if (lhs_type->Is(Type::PlainNumber()) &&
+ rhs_type->Is(Type::PlainNumber())) {
VisitFloat64Binop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Float64LessThan(),
@@ -2087,20 +2136,27 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNumberMin: {
- // TODO(turbofan): We should consider feedback types here as well.
- if (BothInputsAreUnsigned32(node)) {
+ // It is safe to use the feedback types for left and right hand side
+ // here, since we can only narrow those types and thus we can only
+ // promise a more specific truncation.
+ Type* const lhs_type = TypeOf(node->InputAt(0));
+ Type* const rhs_type = TypeOf(node->InputAt(1));
+ if (lhs_type->Is(Type::Unsigned32()) &&
+ rhs_type->Is(Type::Unsigned32())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (BothInputsAreSigned32(node)) {
+ } else if (lhs_type->Is(Type::Signed32()) &&
+ rhs_type->Is(Type::Signed32())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (BothInputsAre(node, Type::PlainNumber())) {
+ } else if (lhs_type->Is(Type::PlainNumber()) &&
+ rhs_type->Is(Type::PlainNumber())) {
VisitFloat64Binop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Float64LessThan(),
@@ -2271,16 +2327,22 @@ class RepresentationSelector {
case IrOpcode::kCheckBounds: {
Type* index_type = TypeOf(node->InputAt(0));
Type* length_type = TypeOf(node->InputAt(1));
- if (index_type->Is(Type::Unsigned32())) {
+ if (index_type->Is(Type::Integral32OrMinusZero())) {
+ // Map -0 to 0, and the values in the [-2^31,-1] range to the
+ // [2^31,2^32-1] range, which will be considered out-of-bounds
+ // as well, because the {length_type} is limited to Unsigned31.
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower() && index_type->Max() < length_type->Min()) {
- // The bounds check is redundant if we already know that
- // the index is within the bounds of [0.0, length[.
- DeferReplacement(node, node->InputAt(0));
+ if (lower()) {
+ if (index_type->Min() >= 0.0 &&
+ index_type->Max() < length_type->Min()) {
+ // The bounds check is redundant if we already know that
+ // the index is within the bounds of [0.0, length[.
+ DeferReplacement(node, node->InputAt(0));
+ }
}
} else {
- VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+ VisitBinop(node, UseInfo::CheckedSigned32AsWord32(kIdentifyZeros),
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
}
@@ -2315,19 +2377,9 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckNumber: {
- if (InputIs(node, Type::Number())) {
- if (truncation.IsUsedAsWord32()) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- } else {
- // TODO(jarin,bmeurer): We need to go to Tagged here, because
- // otherwise we cannot distinguish the hole NaN (which might need to
- // be treated as undefined). We should have a dedicated Type for
- // that at some point, and maybe even a dedicated truncation.
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTagged);
- }
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ Type* const input_type = TypeOf(node->InputAt(0));
+ if (input_type->Is(Type::Number())) {
+ VisitNoop(node, truncation);
} else {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
}
@@ -2346,7 +2398,8 @@ class RepresentationSelector {
}
case IrOpcode::kCheckSmi: {
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
- VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
+ VisitUnop(node,
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros),
MachineRepresentation::kWord32);
} else {
VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
@@ -2541,10 +2594,56 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kSpeculativeToNumber: {
+ NumberOperationHint const hint = NumberOperationHintOf(node->op());
+ switch (hint) {
+ case NumberOperationHint::kSigned32:
+ case NumberOperationHint::kSignedSmall:
+ VisitUnop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ break;
+ case NumberOperationHint::kNumber:
+ case NumberOperationHint::kNumberOrOddball:
+ VisitUnop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ MachineRepresentation::kFloat64);
+ break;
+ }
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ return;
+ }
case IrOpcode::kObjectIsDetectableCallable: {
VisitObjectIs(node, Type::DetectableCallable(), lowering);
return;
}
+ case IrOpcode::kObjectIsNaN: {
+ Type* const input_type = GetUpperBound(node->InputAt(0));
+ if (input_type->Is(Type::NaN())) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else if (!input_type->Maybe(Type::NaN())) {
+ VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ } else if (input_type->Is(Type::Number())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) {
+ // ObjectIsNaN(x:kRepFloat64) => Word32Equal(Float64Equal(x,x),#0)
+ Node* const input = node->InputAt(0);
+ node->ReplaceInput(
+ 0, jsgraph_->graph()->NewNode(
+ lowering->machine()->Float64Equal(), input, input));
+ node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ }
+ return;
+ }
case IrOpcode::kObjectIsNonCallable: {
VisitObjectIs(node, Type::NonCallable(), lowering);
return;
@@ -2566,14 +2665,26 @@ class RepresentationSelector {
VisitObjectIs(node, Type::String(), lowering);
return;
}
+ case IrOpcode::kObjectIsSymbol: {
+ VisitObjectIs(node, Type::Symbol(), lowering);
+ return;
+ }
case IrOpcode::kObjectIsUndetectable: {
VisitObjectIs(node, Type::Undetectable(), lowering);
return;
}
- case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kArgumentsFrame: {
+ SetOutput(node, MachineType::PointerRepresentation());
+ return;
+ }
+ case IrOpcode::kArgumentsLength: {
+ VisitUnop(node, UseInfo::PointerInt(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kNewUnmappedArgumentsElements: {
- ProcessRemainingInputs(node, 0);
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ VisitBinop(node, UseInfo::PointerInt(), UseInfo::TaggedSigned(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kArrayBufferWasNeutered: {
@@ -2581,13 +2692,36 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckFloat64Hole: {
- CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
- ProcessInput(node, 0, UseInfo::TruncatingFloat64());
- ProcessRemainingInputs(node, 1);
- SetOutput(node, MachineRepresentation::kFloat64);
- if (truncation.IsUsedAsFloat64() &&
- mode == CheckFloat64HoleMode::kAllowReturnHole) {
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ Type* const input_type = TypeOf(node->InputAt(0));
+ if (input_type->Is(Type::Number())) {
+ VisitNoop(node, truncation);
+ } else {
+ CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUsedAsWord32()) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (truncation.IsUsedAsFloat64()) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(
+ node,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
+ MachineRepresentation::kFloat64, Type::Number());
+ }
+ break;
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ VisitUnop(
+ node,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
+ MachineRepresentation::kFloat64, Type::Number());
+ break;
+ }
}
return;
}
@@ -2687,7 +2821,8 @@ class RepresentationSelector {
case IrOpcode::kBeginRegion:
case IrOpcode::kProjection:
case IrOpcode::kOsrValue:
- case IrOpcode::kArgumentsObjectState:
+ case IrOpcode::kArgumentsElementsState:
+ case IrOpcode::kArgumentsLengthState:
// All JavaScript operators except JSToNumber have uniform handling.
#define OPCODE_CASE(name) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -2842,7 +2977,6 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
Node* frame_state = node->InputAt(2);
Node* effect = node->InputAt(3);
Node* control = node->InputAt(4);
- Node* throwing;
Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
Node* branch0 =
@@ -2860,10 +2994,18 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
Node* efalse0 = effect;
Node* vfalse0;
{
- throwing = vfalse0 = efalse0 =
+ vfalse0 = efalse0 = if_false0 =
graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
frame_state, efalse0, if_false0);
- if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ // Update potential {IfException} uses of {node} to point to the above
+ // {ToNumber} stub call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse0);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
+ }
Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
@@ -2905,10 +3047,9 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
edge.from()->ReplaceUses(control);
edge.from()->Kill();
- } else if (edge.from()->opcode() == IrOpcode::kIfException) {
- edge.UpdateTo(throwing);
} else {
- UNREACHABLE();
+ DCHECK(edge.from()->opcode() != IrOpcode::kIfException);
+ edge.UpdateTo(control);
}
} else if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
@@ -2926,7 +3067,6 @@ void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
Node* frame_state = node->InputAt(2);
Node* effect = node->InputAt(3);
Node* control = node->InputAt(4);
- Node* throwing;
Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
Node* branch0 =
@@ -2941,10 +3081,18 @@ void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
Node* efalse0 = effect;
Node* vfalse0;
{
- throwing = vfalse0 = efalse0 =
+ vfalse0 = efalse0 = if_false0 =
graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
frame_state, efalse0, if_false0);
- if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ // Update potential {IfException} uses of {node} to point to the above
+ // {ToNumber} stub call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse0);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
+ }
Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
@@ -2982,10 +3130,9 @@ void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
edge.from()->ReplaceUses(control);
edge.from()->Kill();
- } else if (edge.from()->opcode() == IrOpcode::kIfException) {
- edge.UpdateTo(throwing);
} else {
- UNREACHABLE();
+ DCHECK(edge.from()->opcode() != IrOpcode::kIfException);
+ edge.UpdateTo(control);
}
} else if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 90a4e344d8..9fb0fc55bf 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -213,7 +213,8 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
}
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedInt32Mul ||
+ DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
+ op->opcode() == IrOpcode::kCheckedInt32Mul ||
op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
op->opcode() == IrOpcode::kCheckedTaggedToInt32);
return OpParameter<CheckForMinusZeroMode>(op);
@@ -371,7 +372,8 @@ size_t hash_value(NumberOperationHint hint) {
}
NumberOperationHint NumberOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeToNumber ||
+ op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
@@ -388,15 +390,32 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
-int ParameterCountOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kNewUnmappedArgumentsElements ||
- op->opcode() == IrOpcode::kNewRestParameterElements);
- return OpParameter<int>(op);
+size_t hash_value(AllocateParameters info) {
+ return base::hash_combine(info.type(), info.pretenure());
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ AllocateParameters info) {
+ info.type()->PrintTo(os);
+ return os << ", " << info.pretenure();
+}
+
+bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) {
+ return lhs.pretenure() == rhs.pretenure() && lhs.type() == rhs.type();
+}
+
+bool operator!=(AllocateParameters const& lhs, AllocateParameters const& rhs) {
+ return !(lhs == rhs);
}
PretenureFlag PretenureFlagOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
- return OpParameter<PretenureFlag>(op);
+ return OpParameter<AllocateParameters>(op).pretenure();
+}
+
+Type* AllocateTypeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
+ return OpParameter<AllocateParameters>(op).type();
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
@@ -470,7 +489,6 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
@@ -478,14 +496,17 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
@@ -564,6 +585,34 @@ struct SimplifiedOperatorGlobalCache final {
};
ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
+ struct ArgumentsFrameOperator final : public Operator {
+ ArgumentsFrameOperator()
+ : Operator(IrOpcode::kArgumentsFrame, Operator::kPure, "ArgumentsFrame",
+ 0, 0, 0, 1, 0, 0) {}
+ };
+ ArgumentsFrameOperator kArgumentsFrame;
+
+ struct NewUnmappedArgumentsElementsOperator final : public Operator {
+ NewUnmappedArgumentsElementsOperator()
+ : Operator(IrOpcode::kNewUnmappedArgumentsElements,
+ Operator::kEliminatable, "NewUnmappedArgumentsElements", 2,
+ 1, 0, 1, 1, 0) {}
+ };
+ NewUnmappedArgumentsElementsOperator kNewUnmappedArgumentsElements;
+
+ template <CheckForMinusZeroMode kMode>
+ struct ChangeFloat64ToTaggedOperator final
+ : public Operator1<CheckForMinusZeroMode> {
+ ChangeFloat64ToTaggedOperator()
+ : Operator1<CheckForMinusZeroMode>(
+ IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
+ "ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
+ };
+ ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+ ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+
template <CheckForMinusZeroMode kMode>
struct CheckedInt32MulOperator final
: public Operator1<CheckForMinusZeroMode> {
@@ -634,17 +683,6 @@ struct SimplifiedOperatorGlobalCache final {
CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kNeverReturnHole>
kCheckFloat64HoleNeverReturnHoleOperator;
- template <PretenureFlag kPretenure>
- struct AllocateOperator final : public Operator1<PretenureFlag> {
- AllocateOperator()
- : Operator1<PretenureFlag>(
- IrOpcode::kAllocate,
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
- "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
- };
- AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
- AllocateOperator<TENURED> kAllocateTenuredOperator;
-
struct EnsureWritableFastElementsOperator final : public Operator {
EnsureWritableFastElementsOperator()
: Operator( // --
@@ -672,6 +710,26 @@ struct SimplifiedOperatorGlobalCache final {
SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
#undef SPECULATIVE_NUMBER_BINOP
+ template <NumberOperationHint kHint>
+ struct SpeculativeToNumberOperator final
+ : public Operator1<NumberOperationHint> {
+ SpeculativeToNumberOperator()
+ : Operator1<NumberOperationHint>(
+ IrOpcode::kSpeculativeToNumber, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // flags
+ "SpeculativeToNumber", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ kHint) {} // parameter
+ };
+ SpeculativeToNumberOperator<NumberOperationHint::kSignedSmall>
+ kSpeculativeToNumberSignedSmallOperator;
+ SpeculativeToNumberOperator<NumberOperationHint::kSigned32>
+ kSpeculativeToNumberSigned32Operator;
+ SpeculativeToNumberOperator<NumberOperationHint::kNumber>
+ kSpeculativeToNumberNumberOperator;
+ SpeculativeToNumberOperator<NumberOperationHint::kNumberOrOddball>
+ kSpeculativeToNumberNumberOrOddballOperator;
+
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
@@ -708,8 +766,22 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
PURE_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
+GET_FROM_CACHE(ArgumentsFrame)
+GET_FROM_CACHE(NewUnmappedArgumentsElements)
#undef GET_FROM_CACHE
+const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
+ CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
CheckForMinusZeroMode mode) {
switch (mode) {
@@ -781,6 +853,22 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
return nullptr;
}
+const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
+ NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return &cache_.kSpeculativeToNumberSignedSmallOperator;
+ case NumberOperationHint::kSigned32:
+ return &cache_.kSpeculativeToNumberSigned32Operator;
+ case NumberOperationHint::kNumber:
+ return &cache_.kSpeculativeToNumberNumberOperator;
+ case NumberOperationHint::kNumberOrOddball:
+ return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
return &cache_.kEnsureWritableFastElements;
}
@@ -805,35 +893,57 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
transition); // parameter
}
-const Operator* SimplifiedOperatorBuilder::NewUnmappedArgumentsElements(
- int parameter_count) {
- return new (zone()) Operator1<int>( // --
- IrOpcode::kNewUnmappedArgumentsElements, // opcode
- Operator::kEliminatable, // flags
- "NewUnmappedArgumentsElements", // name
- 0, 1, 0, 1, 1, 0, // counts
- parameter_count); // parameter
-}
-
-const Operator* SimplifiedOperatorBuilder::NewRestParameterElements(
- int parameter_count) {
- return new (zone()) Operator1<int>( // --
- IrOpcode::kNewRestParameterElements, // opcode
- Operator::kEliminatable, // flags
- "NewRestParameterElements", // name
- 0, 1, 0, 1, 1, 0, // counts
- parameter_count); // parameter
-}
-
-const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
- switch (pretenure) {
- case NOT_TENURED:
- return &cache_.kAllocateNotTenuredOperator;
- case TENURED:
- return &cache_.kAllocateTenuredOperator;
- }
- UNREACHABLE();
- return nullptr;
+namespace {
+
+struct ArgumentsLengthParameters {
+ int formal_parameter_count;
+ bool is_rest_length;
+};
+
+bool operator==(ArgumentsLengthParameters first,
+ ArgumentsLengthParameters second) {
+ return first.formal_parameter_count == second.formal_parameter_count &&
+ first.is_rest_length == second.is_rest_length;
+}
+
+size_t hash_value(ArgumentsLengthParameters param) {
+ return base::hash_combine(param.formal_parameter_count, param.is_rest_length);
+}
+
+std::ostream& operator<<(std::ostream& os, ArgumentsLengthParameters param) {
+ return os << param.formal_parameter_count << ", "
+ << (param.is_rest_length ? "rest length" : "not rest length");
+}
+
+} // namespace
+
+const Operator* SimplifiedOperatorBuilder::ArgumentsLength(
+ int formal_parameter_count, bool is_rest_length) {
+ return new (zone()) Operator1<ArgumentsLengthParameters>( // --
+ IrOpcode::kArgumentsLength, // opcode
+ Operator::kPure, // flags
+ "ArgumentsLength", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ ArgumentsLengthParameters{formal_parameter_count,
+ is_rest_length}); // parameter
+}
+
+int FormalParameterCountOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kArgumentsLength);
+ return OpParameter<ArgumentsLengthParameters>(op).formal_parameter_count;
+}
+
+bool IsRestLengthOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kArgumentsLength);
+ return OpParameter<ArgumentsLengthParameters>(op).is_rest_length;
+}
+
+const Operator* SimplifiedOperatorBuilder::Allocate(Type* type,
+ PretenureFlag pretenure) {
+ return new (zone()) Operator1<AllocateParameters>(
+ IrOpcode::kAllocate,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, "Allocate",
+ 1, 1, 1, 1, 1, 0, AllocateParameters(type, pretenure));
}
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index ff3f60a423..3750861bf0 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -242,10 +242,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
-int ParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+int FormalParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+bool IsRestLengthOf(const Operator* op) WARN_UNUSED_RESULT;
+
+class AllocateParameters {
+ public:
+ AllocateParameters(Type* type, PretenureFlag pretenure)
+ : type_(type), pretenure_(pretenure) {}
+
+ Type* type() const { return type_; }
+ PretenureFlag pretenure() const { return pretenure_; }
+
+ private:
+ Type* type_;
+ PretenureFlag pretenure_;
+};
+
+size_t hash_value(AllocateParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
+
+bool operator==(AllocateParameters const&, AllocateParameters const&);
+bool operator!=(AllocateParameters const&, AllocateParameters const&);
PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
+Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
@@ -358,6 +381,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* SpeculativeToNumber(NumberOperationHint hint);
+
const Operator* PlainPrimitiveToNumber();
const Operator* PlainPrimitiveToWord32();
const Operator* PlainPrimitiveToFloat64();
@@ -370,13 +395,14 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
- const Operator* ChangeFloat64ToTagged();
+ const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
const Operator* TruncateTaggedToWord32();
const Operator* TruncateTaggedToFloat64();
const Operator* TruncateTaggedToBit();
+ const Operator* TruncateTaggedPointerToBit();
const Operator* CheckIf();
const Operator* CheckBounds();
@@ -412,18 +438,21 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ConvertTaggedHoleToUndefined();
const Operator* ObjectIsDetectableCallable();
+ const Operator* ObjectIsNaN();
const Operator* ObjectIsNonCallable();
const Operator* ObjectIsNumber();
const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
const Operator* ObjectIsString();
+ const Operator* ObjectIsSymbol();
const Operator* ObjectIsUndetectable();
- // new-rest-parameter-elements
- const Operator* NewRestParameterElements(int parameter_count);
+ const Operator* ArgumentsFrame();
+ const Operator* ArgumentsLength(int formal_parameter_count,
+ bool is_rest_length);
// new-unmapped-arguments-elements
- const Operator* NewUnmappedArgumentsElements(int parameter_count);
+ const Operator* NewUnmappedArgumentsElements();
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
@@ -437,7 +466,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
- const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
+ const Operator* Allocate(Type* type, PretenureFlag pretenure = NOT_TENURED);
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
index 605b0e7282..51299f8c66 100644
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ b/deps/v8/src/compiler/tail-call-optimization.cc
@@ -18,67 +18,59 @@ Reduction TailCallOptimization::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kReturn) return NoChange();
// The value which is returned must be the result of a potential tail call,
// there must be no try/catch/finally around the Call, and there must be no
- // other effect between the Call and the Return nodes.
+ // other effect or control between the Call and the Return nodes.
Node* const call = NodeProperties::GetValueInput(node, 1);
if (call->opcode() == IrOpcode::kCall &&
CallDescriptorOf(call->op())->SupportsTailCalls() &&
NodeProperties::GetEffectInput(node) == call &&
- !NodeProperties::IsExceptionalCall(call)) {
- Node* const control = NodeProperties::GetControlInput(node);
+ NodeProperties::GetControlInput(node) == call &&
+ !NodeProperties::IsExceptionalCall(call) && call->UseCount() == 3) {
// Ensure that no additional arguments are being popped other than those in
// the CallDescriptor, otherwise the tail call transformation is invalid.
DCHECK_EQ(0, Int32Matcher(NodeProperties::GetValueInput(node, 0)).Value());
- if (control->opcode() == IrOpcode::kIfSuccess &&
- call->OwnedBy(node, control) && control->OwnedBy(node)) {
- // Furthermore, control has to flow via an IfSuccess from the Call, so
- // the Return node value and effect depends directly on the Call node,
- // and indirectly control depends on the Call via an IfSuccess.
+ // Furthermore, the Return node value, effect, and control depends
+ // directly on the Call, no other uses of the Call node exist.
+ //
+ // The input graph looks as follows:
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // Call[Descriptor]
- // ^ ^ ^
- // | | |
- // +-+ | |
- // | | |
- // | +-+ |
- // | | IfSuccess
- // | | ^
- // | | |
- // Return
- // ^
- // |
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // Call[Descriptor]
+ // ^ ^ ^
+ // Int32(0) <-+ | | |
+ // \ | | |
+ // Return
+ // ^
+ // |
- // The resulting graph looks like this:
+ // The resulting graph looks like this:
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // TailCall[Descriptor]
- // ^
- // |
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // TailCall[Descriptor]
+ // ^
+ // |
- DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
- DCHECK_EQ(4, node->InputCount());
- node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
- node->ReplaceInput(1, NodeProperties::GetControlInput(call));
- node->RemoveInput(3);
- node->RemoveInput(2);
- for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
- node->InsertInput(graph()->zone(), index,
- NodeProperties::GetValueInput(call, index));
- }
- NodeProperties::ChangeOp(
- node, common()->TailCall(CallDescriptorOf(call->op())));
- return Changed(node);
+ DCHECK_EQ(4, node->InputCount());
+ node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
+ node->ReplaceInput(1, NodeProperties::GetControlInput(call));
+ node->RemoveInput(3);
+ node->RemoveInput(2);
+ for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
+ node->InsertInput(graph()->zone(), index,
+ NodeProperties::GetValueInput(call, index));
}
+ NodeProperties::ChangeOp(node,
+ common()->TailCall(CallDescriptorOf(call->op())));
+ return Changed(node);
}
return NoChange();
}
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index e130a10e4e..b95e22a2e5 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -78,6 +78,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceCheckHeapObject(node);
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
+ case IrOpcode::kCheckNumber:
+ return ReduceCheckNumber(node);
case IrOpcode::kCheckString:
return ReduceCheckString(node);
case IrOpcode::kLoadField:
@@ -96,6 +98,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceReferenceEqual(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kSpeculativeToNumber:
+ return ReduceSpeculativeToNumber(node);
default:
break;
}
@@ -150,6 +154,16 @@ Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckNumber(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Number())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckString(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type* const input_type = NodeProperties::GetType(input);
@@ -195,7 +209,8 @@ Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
return Replace(input);
}
if (input_type->Is(Type::PlainNumber()) &&
- input->opcode() == IrOpcode::kNumberDivide) {
+ (input->opcode() == IrOpcode::kNumberDivide ||
+ input->opcode() == IrOpcode::kSpeculativeNumberDivide)) {
Node* const lhs = NodeProperties::GetValueInput(input, 0);
Type* const lhs_type = NodeProperties::GetType(lhs);
Node* const rhs = NodeProperties::GetValueInput(input, 1);
@@ -310,6 +325,18 @@ Reduction TypedOptimization::ReduceSelect(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceSpeculativeToNumber(Node* node) {
+ DCHECK_EQ(IrOpcode::kSpeculativeToNumber, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Number())) {
+ // SpeculativeToNumber(x:number) => x
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Factory* TypedOptimization::factory() const { return isolate()->factory(); }
Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 93de680d4f..c441daf222 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -44,6 +44,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
private:
Reduction ReduceCheckHeapObject(Node* node);
Reduction ReduceCheckMaps(Node* node);
+ Reduction ReduceCheckNumber(Node* node);
Reduction ReduceCheckString(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberFloor(Node* node);
@@ -52,6 +53,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReducePhi(Node* node);
Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction ReduceSpeculativeToNumber(Node* node);
CompilationDependencies* dependencies() const { return dependencies_; }
Factory* factory() const;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index ed1a04aa3b..94c54ac600 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -108,6 +108,7 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return UpdateType(node, TypeUnaryOp(node, x));
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -173,6 +174,7 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return TypeUnaryOp(node, x);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -259,7 +261,6 @@ class Typer::Visitor : public Reducer {
typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
- static Type* Invert(Type*, Typer*);
static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
static Type* ToPrimitive(Type*, Typer*);
@@ -275,6 +276,7 @@ class Typer::Visitor : public Reducer {
return t->operation_typer_.Name(type); \
}
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name) \
static Type* Name(Type* lhs, Type* rhs, Typer* t) { \
@@ -285,14 +287,17 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_METHOD
static Type* ObjectIsDetectableCallable(Type*, Typer*);
+ static Type* ObjectIsNaN(Type*, Typer*);
static Type* ObjectIsNonCallable(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
static Type* ObjectIsReceiver(Type*, Typer*);
static Type* ObjectIsSmi(Type*, Typer*);
static Type* ObjectIsString(Type*, Typer*);
+ static Type* ObjectIsSymbol(Type*, Typer*);
static Type* ObjectIsUndetectable(Type*, Typer*);
static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
+ static ComparisonOutcome NumberCompareTyper(Type*, Type*, Typer*);
#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
@@ -300,6 +305,9 @@ class Typer::Visitor : public Reducer {
static Type* JSCallTyper(Type*, Typer*);
+ static Type* NumberEqualTyper(Type*, Type*, Typer*);
+ static Type* NumberLessThanTyper(Type*, Type*, Typer*);
+ static Type* NumberLessThanOrEqualTyper(Type*, Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
static Type* StringFromCharCodeTyper(Type*, Typer*);
static Type* StringFromCodePointTyper(Type*, Typer*);
@@ -387,15 +395,6 @@ Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
}
-Type* Typer::Visitor::Invert(Type* type, Typer* t) {
- DCHECK(type->Is(Type::Boolean()));
- DCHECK(type->IsInhabited());
- if (type->Is(t->singleton_false_)) return t->singleton_true_;
- if (type->Is(t->singleton_true_)) return t->singleton_false_;
- return type;
-}
-
-
Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
ComparisonOutcome outcome, Typer* t) {
ComparisonOutcome result(0);
@@ -458,9 +457,14 @@ Type* Typer::Visitor::ToLength(Type* type, Typer* t) {
type = ToInteger(type, t);
double min = type->Min();
double max = type->Max();
+ if (max <= 0.0) {
+ return Type::NewConstant(0, t->zone());
+ }
+ if (min >= kMaxSafeInteger) {
+ return Type::NewConstant(kMaxSafeInteger, t->zone());
+ }
if (min <= 0.0) min = 0.0;
- if (max > kMaxSafeInteger) max = kMaxSafeInteger;
- if (max <= min) max = min;
+ if (max >= kMaxSafeInteger) max = kMaxSafeInteger;
return Type::Range(min, max, t->zone());
}
@@ -509,6 +513,12 @@ Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsNaN(Type* type, Typer* t) {
+ if (type->Is(Type::NaN())) return t->singleton_true_;
+ if (!type->Maybe(Type::NaN())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsNonCallable(Type* type, Typer* t) {
if (type->Is(Type::NonCallable())) return t->singleton_true_;
if (!type->Maybe(Type::NonCallable())) return t->singleton_false_;
@@ -540,6 +550,12 @@ Type* Typer::Visitor::ObjectIsString(Type* type, Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsSymbol(Type* type, Typer* t) {
+ if (type->Is(Type::Symbol())) return t->singleton_true_;
+ if (!type->Maybe(Type::Symbol())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
if (type->Is(Type::Undetectable())) return t->singleton_true_;
if (!type->Maybe(Type::Undetectable())) return t->singleton_false_;
@@ -834,7 +850,11 @@ Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
return Type::Internal();
}
-Type* Typer::Visitor::TypeArgumentsObjectState(Node* node) {
+Type* Typer::Visitor::TypeArgumentsElementsState(Node* node) {
+ return Type::Internal();
+}
+
+Type* Typer::Visitor::TypeArgumentsLengthState(Node* node) {
return Type::Internal();
}
@@ -885,11 +905,6 @@ Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
}
-Type* Typer::Visitor::JSNotEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- return Invert(JSEqualTyper(lhs, rhs, t), t);
-}
-
-
static Type* JSType(Type* type) {
if (type->Is(Type::Boolean())) return Type::Boolean();
if (type->Is(Type::String())) return Type::String();
@@ -921,11 +936,6 @@ Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
}
-Type* Typer::Visitor::JSStrictNotEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- return Invert(JSStrictEqualTyper(lhs, rhs, t), t);
-}
-
-
// The EcmaScript specification defines the four relational comparison operators
// (<, <=, >=, >) with the help of a single abstract one. It behaves like <
// but returns undefined when the inputs cannot be compared.
@@ -939,9 +949,12 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::JSCompareTyper(Type* lhs,
return ComparisonOutcome(kComparisonTrue) |
ComparisonOutcome(kComparisonFalse);
}
- lhs = ToNumber(lhs, t);
- rhs = ToNumber(rhs, t);
+ return NumberCompareTyper(ToNumber(lhs, t), ToNumber(rhs, t), t);
+}
+Typer::Visitor::ComparisonOutcome Typer::Visitor::NumberCompareTyper(Type* lhs,
+ Type* rhs,
+ Typer* t) {
// Shortcut for NaNs.
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
@@ -1693,24 +1706,46 @@ Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
+// static
+Type* Typer::Visitor::NumberEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+ return JSEqualTyper(ToNumber(lhs, t), ToNumber(rhs, t), t);
+}
-Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
+// static
+Type* Typer::Visitor::NumberLessThanTyper(Type* lhs, Type* rhs, Typer* t) {
+ return FalsifyUndefined(
+ NumberCompareTyper(ToNumber(lhs, t), ToNumber(rhs, t), t), t);
+}
+
+// static
+Type* Typer::Visitor::NumberLessThanOrEqualTyper(Type* lhs, Type* rhs,
+ Typer* t) {
+ return FalsifyUndefined(
+ Invert(JSCompareTyper(ToNumber(rhs, t), ToNumber(lhs, t), t), t), t);
+}
+
+Type* Typer::Visitor::TypeNumberEqual(Node* node) {
+ return TypeBinaryOp(node, NumberEqualTyper);
+}
+
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
+ return TypeBinaryOp(node, NumberLessThanTyper);
+}
Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
- return Type::Boolean();
+ return TypeBinaryOp(node, NumberLessThanOrEqualTyper);
}
Type* Typer::Visitor::TypeSpeculativeNumberEqual(Node* node) {
- return Type::Boolean();
+ return TypeBinaryOp(node, NumberEqualTyper);
}
Type* Typer::Visitor::TypeSpeculativeNumberLessThan(Node* node) {
- return Type::Boolean();
+ return TypeBinaryOp(node, NumberLessThanTyper);
}
Type* Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
- return Type::Boolean();
+ return TypeBinaryOp(node, NumberLessThanOrEqualTyper);
}
Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
@@ -1775,6 +1810,9 @@ Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
Type* Typer::Visitor::TypeCheckBounds(Node* node) {
Type* index = Operand(node, 0);
Type* length = Operand(node, 1);
+ if (index->Maybe(Type::MinusZero())) {
+ index = Type::Union(index, typer_->cache_.kSingletonZero, zone());
+ }
index = Type::Intersect(index, Type::Integral32(), zone());
if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
double min = std::max(index->Min(), 0.0);
@@ -1804,8 +1842,7 @@ Type* Typer::Visitor::TypeCheckMaps(Node* node) {
}
Type* Typer::Visitor::TypeCheckNumber(Node* node) {
- Type* arg = Operand(node, 0);
- return Type::Intersect(arg, Type::Number(), zone());
+ return typer_->operation_typer_.CheckNumber(Operand(node, 0));
}
Type* Typer::Visitor::TypeCheckReceiver(Node* node) {
@@ -1824,8 +1861,7 @@ Type* Typer::Visitor::TypeCheckString(Node* node) {
}
Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
- Type* type = Operand(node, 0);
- return type;
+ return typer_->operation_typer_.CheckFloat64Hole(Operand(node, 0));
}
Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
@@ -1844,7 +1880,9 @@ Type* Typer::Visitor::TypeConvertTaggedHoleToUndefined(Node* node) {
return type;
}
-Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeAllocate(Node* node) {
+ return AllocateTypeOf(node->op());
+}
Type* Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
@@ -1905,6 +1943,10 @@ Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
return TypeUnaryOp(node, ObjectIsDetectableCallable);
}
+Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
+ return TypeUnaryOp(node, ObjectIsNaN);
+}
+
Type* Typer::Visitor::TypeObjectIsNonCallable(Node* node) {
return TypeUnaryOp(node, ObjectIsNonCallable);
}
@@ -1927,15 +1969,23 @@ Type* Typer::Visitor::TypeObjectIsString(Node* node) {
return TypeUnaryOp(node, ObjectIsString);
}
+Type* Typer::Visitor::TypeObjectIsSymbol(Node* node) {
+ return TypeUnaryOp(node, ObjectIsSymbol);
+}
+
Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
}
-Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
- return Type::OtherInternal();
+Type* Typer::Visitor::TypeArgumentsLength(Node* node) {
+ return TypeCache::Get().kArgumentsLengthType;
+}
+
+Type* Typer::Visitor::TypeArgumentsFrame(Node* node) {
+ return Type::ExternalPointer();
}
-Type* Typer::Visitor::TypeNewRestParameterElements(Node* node) {
+Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
return Type::OtherInternal();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index f28a56a43b..e0de4ef97a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -181,7 +181,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
if (map == heap->boolean_map()) return kBoolean;
if (map == heap->the_hole_map()) return kHole;
DCHECK(map == heap->uninitialized_map() ||
- map == heap->no_interceptor_result_sentinel_map() ||
map == heap->termination_exception_map() ||
map == heap->arguments_marker_map() ||
map == heap->optimized_out_map() ||
@@ -214,6 +213,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
@@ -320,6 +320,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE:
+ case STACK_FRAME_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
@@ -327,6 +328,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
case CONSTANT_ELEMENTS_PAIR_TYPE:
+ case ASYNC_GENERATOR_REQUEST_TYPE:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 9e55a0bc88..fe0df3300f 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -139,7 +139,8 @@ namespace compiler {
V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
V(Integral32, kSigned32 | kUnsigned32) \
- V(Integral32OrMinusZeroOrNaN, kIntegral32 | kMinusZero | kNaN) \
+ V(Integral32OrMinusZero, kIntegral32 | kMinusZero) \
+ V(Integral32OrMinusZeroOrNaN, kIntegral32OrMinusZero | kNaN) \
V(PlainNumber, kIntegral32 | kOtherNumber) \
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
@@ -155,6 +156,7 @@ namespace compiler {
V(NullOrNumber, kNull | kNumber) \
V(NullOrUndefined, kNull | kUndefined) \
V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
+ V(NumberOrHole, kNumber | kHole) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
kHole) \
V(NumberOrString, kNumber | kString) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 7f63ceb803..3c79c67fff 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -22,6 +22,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/ostreams.h"
namespace v8 {
@@ -151,27 +152,45 @@ void Verifier::Visitor::Check(Node* node) {
"control");
}
- // Verify that nodes that can throw only have IfSuccess/IfException control
- // uses.
+ // Verify that nodes that can throw either have both IfSuccess/IfException
+ // projections as the only control uses or no projections at all.
if (!node->op()->HasProperty(Operator::kNoThrow)) {
- int count_success = 0, count_exception = 0;
+ Node* discovered_if_exception = nullptr;
+ Node* discovered_if_success = nullptr;
+ int total_number_of_control_uses = 0;
for (Edge edge : node->use_edges()) {
if (!NodeProperties::IsControlEdge(edge)) {
continue;
}
+ total_number_of_control_uses++;
Node* control_use = edge.from();
- if (control_use->opcode() != IrOpcode::kIfSuccess &&
- control_use->opcode() != IrOpcode::kIfException) {
- V8_Fatal(__FILE__, __LINE__,
- "#%d:%s should be followed by IfSuccess/IfException, but is "
- "followed by #%d:%s",
- node->id(), node->op()->mnemonic(), control_use->id(),
- control_use->op()->mnemonic());
+ if (control_use->opcode() == IrOpcode::kIfSuccess) {
+ CHECK_NULL(discovered_if_success); // Only one allowed.
+ discovered_if_success = control_use;
+ }
+ if (control_use->opcode() == IrOpcode::kIfException) {
+ CHECK_NULL(discovered_if_exception); // Only one allowed.
+ discovered_if_exception = control_use;
}
- if (control_use->opcode() == IrOpcode::kIfSuccess) ++count_success;
- if (control_use->opcode() == IrOpcode::kIfException) ++count_exception;
- CHECK_LE(count_success, 1);
- CHECK_LE(count_exception, 1);
+ }
+ if (discovered_if_success && !discovered_if_exception) {
+ V8_Fatal(__FILE__, __LINE__,
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(),
+ discovered_if_success->id(),
+ discovered_if_success->op()->mnemonic());
+ }
+ if (discovered_if_exception && !discovered_if_success) {
+ V8_Fatal(__FILE__, __LINE__,
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(),
+ discovered_if_exception->id(),
+ discovered_if_exception->op()->mnemonic());
+ }
+ if (discovered_if_success || discovered_if_exception) {
+ CHECK_EQ(2, total_number_of_control_uses);
}
}
}
@@ -189,6 +208,10 @@ void Verifier::Visitor::Check(Node* node) {
CHECK(node->op()->ValueOutputCount() == 0);
CHECK(node->op()->EffectOutputCount() == 0);
CHECK(node->op()->ControlOutputCount() == 0);
+ // All inputs are graph terminators.
+ for (const Node* input : node->inputs()) {
+ CHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ }
// Type is empty.
CheckNotTyped(node);
break;
@@ -496,7 +519,8 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
- case IrOpcode::kArgumentsObjectState:
+ case IrOpcode::kArgumentsElementsState:
+ case IrOpcode::kArgumentsLengthState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
// TODO(jarin): what are the constraints on these?
@@ -511,9 +535,7 @@ void Verifier::Visitor::Check(Node* node) {
// JavaScript operators
// --------------------
case IrOpcode::kJSEqual:
- case IrOpcode::kJSNotEqual:
case IrOpcode::kJSStrictEqual:
- case IrOpcode::kJSStrictNotEqual:
case IrOpcode::kJSLessThan:
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
@@ -552,8 +574,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::OrderedNumber());
break;
case IrOpcode::kJSToLength:
- // Type is OrderedNumber.
- CheckTypeIs(node, Type::OrderedNumber());
+ CheckTypeIs(node, Type::Range(0, kMaxSafeInteger, zone));
break;
case IrOpcode::kJSToName:
// Type is Name.
@@ -893,6 +914,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kSpeculativeToNumber:
+ // Any -> Number
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Number());
+ break;
case IrOpcode::kPlainPrimitiveToNumber:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
@@ -953,18 +979,29 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kObjectIsDetectableCallable:
+ case IrOpcode::kObjectIsNaN:
case IrOpcode::kObjectIsNonCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
case IrOpcode::kObjectIsString:
+ case IrOpcode::kObjectIsSymbol:
case IrOpcode::kObjectIsUndetectable:
case IrOpcode::kArrayBufferWasNeutered:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kArgumentsLength:
+ CheckValueInputIs(node, 0, Type::ExternalPointer());
+ CheckTypeIs(node, TypeCache::Get().kArgumentsLengthType);
+ break;
+ case IrOpcode::kArgumentsFrame:
+ CheckTypeIs(node, Type::ExternalPointer());
+ break;
case IrOpcode::kNewUnmappedArgumentsElements:
+ CheckValueInputIs(node, 0, Type::ExternalPointer());
+ CheckValueInputIs(node, 1, Type::Range(-Code::kMaxArguments,
+ Code::kMaxArguments, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
case IrOpcode::kAllocate:
@@ -1101,6 +1138,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
case IrOpcode::kTruncateTaggedToBit:
+ case IrOpcode::kTruncateTaggedPointerToBit:
break;
case IrOpcode::kCheckBounds:
@@ -1163,8 +1201,8 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kCheckFloat64Hole:
- CheckValueInputIs(node, 0, Type::Number());
- CheckTypeIs(node, Type::Number());
+ CheckValueInputIs(node, 0, Type::NumberOrHole());
+ CheckTypeIs(node, Type::NumberOrUndefined());
break;
case IrOpcode::kCheckTaggedHole:
CheckValueInputIs(node, 0, Type::Any());
@@ -1237,6 +1275,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Ctz:
case IrOpcode::kWord32ReverseBits:
case IrOpcode::kWord32ReverseBytes:
+ case IrOpcode::kInt32AbsWithOverflow:
case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
@@ -1250,6 +1289,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64ReverseBits:
case IrOpcode::kWord64ReverseBytes:
+ case IrOpcode::kInt64AbsWithOverflow:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
@@ -1359,6 +1399,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kChangeFloat64ToUint64:
case IrOpcode::kFloat64SilenceNaN:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt32:
@@ -1386,6 +1427,13 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
+ case IrOpcode::kAtomicExchange:
+ case IrOpcode::kAtomicCompareExchange:
+ case IrOpcode::kAtomicAdd:
+ case IrOpcode::kAtomicSub:
+ case IrOpcode::kAtomicAnd:
+ case IrOpcode::kAtomicOr:
+ case IrOpcode::kAtomicXor:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 168178e49e..b6b9e3ff05 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -65,12 +65,51 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
+Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
+ Node** effect_ptr, Node* control) {
+ // TODO(eholk): generate code to modify the thread-local storage directly,
+ // rather than calling the runtime.
+ if (!trap_handler::UseTrapHandler()) {
+ return control;
+ }
+
+ const Runtime::FunctionId f =
+ new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ DCHECK_EQ(0, fun->nargs);
+ const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ Node* inputs[] = {jsgraph->CEntryStubConstant(fun->result_size),
+ jsgraph->ExternalConstant(
+ ExternalReference(f, jsgraph->isolate())), // ref
+ jsgraph->Int32Constant(fun->nargs), // arity
+ jsgraph->NoContextConstant(),
+ *effect_ptr,
+ control};
+
+ Node* node = jsgraph->graph()->NewNode(jsgraph->common()->Call(desc),
+ arraysize(inputs), inputs);
+ *effect_ptr = node;
+ return node;
+}
+
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded context.
Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
Node* context, Node** parameters,
int parameter_count, Node** effect_ptr,
- Node* control) {
+ Node** control) {
+ // Setting and clearing the thread-in-wasm flag should not be done as a normal
+ // runtime call.
+ DCHECK_NE(f, Runtime::kSetThreadInWasm);
+ DCHECK_NE(f, Runtime::kClearThreadInWasm);
+ // We're leaving Wasm code, so clear the flag.
+ *control = BuildModifyThreadInWasmFlag(false, jsgraph, effect_ptr, *control);
+
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -93,17 +132,21 @@ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
inputs[count++] = jsgraph->Int32Constant(fun->nargs); // arity
inputs[count++] = context; // context
inputs[count++] = *effect_ptr;
- inputs[count++] = control;
+ inputs[count++] = *control;
Node* node =
jsgraph->graph()->NewNode(jsgraph->common()->Call(desc), count, inputs);
*effect_ptr = node;
+
+ // Restore the thread-in-wasm flag, since we have returned to Wasm.
+ *control = BuildModifyThreadInWasmFlag(true, jsgraph, effect_ptr, *control);
+
return node;
}
Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
Node** parameters, int parameter_count,
- Node** effect_ptr, Node* control) {
+ Node** effect_ptr, Node** control) {
return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
parameters, parameter_count, effect_ptr,
control);
@@ -111,247 +154,6 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
} // namespace
-// TODO(eholk): Support trap handlers on other platforms.
-#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
-const bool kTrapHandlerSupported = true;
-#else
-const bool kTrapHandlerSupported = false;
-#endif
-
-// A helper that handles building graph fragments for trapping.
-// To avoid generating a ton of redundant code that just calls the runtime
-// to trap, we generate a per-trap-reason block of code that all trap sites
-// in this function will branch to.
-class WasmTrapHelper : public ZoneObject {
- public:
- explicit WasmTrapHelper(WasmGraphBuilder* builder)
- : builder_(builder),
- jsgraph_(builder->jsgraph()),
- graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {}
-
- // Make the current control path trap to unreachable.
- void Unreachable(wasm::WasmCodePosition position) {
- ConnectTrap(wasm::kTrapUnreachable, position);
- }
-
- // Always trap with the given reason.
- void TrapAlways(wasm::TrapReason reason, wasm::WasmCodePosition position) {
- ConnectTrap(reason, position);
- }
-
- // Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
- wasm::WasmCodePosition position) {
- Int32Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
- if (val == 0) {
- AddTrapIfFalse(reason, node, position);
- } else {
- AddTrapIfTrue(reason,
- graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
- jsgraph()->Int32Constant(val)),
- position);
- }
- return builder_->Control();
- }
-
- // Add a check that traps if {node} is zero.
- Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position) {
- return TrapIfEq32(reason, node, 0, position);
- }
-
- // Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
- wasm::WasmCodePosition position) {
- Int64Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
- AddTrapIfTrue(reason, graph()->NewNode(jsgraph()->machine()->Word64Equal(),
- node, jsgraph()->Int64Constant(val)),
- position);
- return builder_->Control();
- }
-
- // Add a check that traps if {node} is zero.
- Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
- wasm::WasmCodePosition position) {
- return TrapIfEq64(reason, node, 0, position);
- }
-
- Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason) {
- if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
- switch (reason) {
-#define TRAPREASON_TO_MESSAGE(name) \
- case wasm::k##name: \
- return Builtins::kThrowWasm##name;
- FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
-#undef TRAPREASON_TO_MESSAGE
- default:
- UNREACHABLE();
- return Builtins::builtin_count;
- }
- } else {
- // We use Runtime::kNumFunctions as a marker to tell the code generator
- // to generate a call to a testing c-function instead of a runtime
- // function. This code should only be called from a cctest.
- return Builtins::builtin_count;
- }
- }
-
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_X87
-#define WASM_TRAP_IF_SUPPORTED
-#endif
-
- // Add a trap if {cond} is true.
- void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
- wasm::WasmCodePosition position) {
-#ifdef WASM_TRAP_IF_SUPPORTED
- if (FLAG_wasm_trap_if) {
- int32_t trap_id = GetBuiltinIdForTrap(reason);
- Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
- builder_->Effect(), builder_->Control());
- *builder_->control_ = node;
- builder_->SetSourcePosition(node, position);
- return;
- }
-#endif // WASM_TRAP_IF_SUPPORTED
- BuildTrapIf(reason, cond, true, position);
- }
-
- // Add a trap if {cond} is false.
- void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
- wasm::WasmCodePosition position) {
-#ifdef WASM_TRAP_IF_SUPPORTED
- if (FLAG_wasm_trap_if) {
- int32_t trap_id = GetBuiltinIdForTrap(reason);
-
- Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
- builder_->Effect(), builder_->Control());
- *builder_->control_ = node;
- builder_->SetSourcePosition(node, position);
- return;
- }
-#endif // WASM_TRAP_IF_SUPPORTED
-
- BuildTrapIf(reason, cond, false, position);
- }
-
- // Add a trap if {cond} is true or false according to {iftrue}.
- void BuildTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
- wasm::WasmCodePosition position) {
- Node** effect_ptr = builder_->effect_;
- Node** control_ptr = builder_->control_;
- Node* before = *effect_ptr;
- BranchHint hint = iftrue ? BranchHint::kFalse : BranchHint::kTrue;
- Node* branch = graph()->NewNode(common()->Branch(hint), cond, *control_ptr);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
- *control_ptr = iftrue ? if_true : if_false;
- ConnectTrap(reason, position);
- *control_ptr = iftrue ? if_false : if_true;
- *effect_ptr = before;
- }
-
- Node* GetTrapValue(wasm::FunctionSig* sig) {
- if (sig->return_count() > 0) {
- return GetTrapValue(sig->GetReturn());
- } else {
- return jsgraph()->Int32Constant(0xdeadbeef);
- }
- }
-
- Node* GetTrapValue(wasm::ValueType type) {
- switch (type) {
- case wasm::kWasmI32:
- return jsgraph()->Int32Constant(0xdeadbeef);
- case wasm::kWasmI64:
- return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
- case wasm::kWasmF32:
- return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
- case wasm::kWasmF64:
- return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
- break;
- case wasm::kWasmS128:
- return builder_->CreateS128Value(0xdeadbeef);
- break;
- default:
- UNREACHABLE();
- return nullptr;
- }
- }
-
- private:
- WasmGraphBuilder* builder_;
- JSGraph* jsgraph_;
- Graph* graph_;
- Node* trap_merge_ = nullptr;
- Node* trap_effect_;
- Node* trap_reason_;
- Node* trap_position_;
-
- JSGraph* jsgraph() { return jsgraph_; }
- Graph* graph() { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() { return jsgraph()->common(); }
-
- void ConnectTrap(wasm::TrapReason reason, wasm::WasmCodePosition position) {
- DCHECK(position != wasm::kNoCodePosition);
- Node* reason_node = builder_->Int32Constant(
- wasm::WasmOpcodes::TrapReasonToMessageId(reason));
- Node* position_node = builder_->Int32Constant(position);
- if (trap_merge_ == nullptr) {
- // Create trap code for the first time.
- return BuildTrapCode(reason_node, position_node);
- }
- // Connect the current control and effect to the existing trap code.
- builder_->AppendToMerge(trap_merge_, builder_->Control());
- builder_->AppendToPhi(trap_effect_, builder_->Effect());
- builder_->AppendToPhi(trap_reason_, reason_node);
- builder_->AppendToPhi(trap_position_, position_node);
- }
-
- void BuildTrapCode(Node* reason_node, Node* position_node) {
- Node** control_ptr = builder_->control_;
- Node** effect_ptr = builder_->effect_;
- wasm::ModuleEnv* module = builder_->module_;
- DCHECK(trap_merge_ == NULL);
- *control_ptr = trap_merge_ =
- graph()->NewNode(common()->Merge(1), *control_ptr);
- *effect_ptr = trap_effect_ =
- graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
- trap_reason_ =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
- reason_node, *control_ptr);
- trap_position_ =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
- position_node, *control_ptr);
-
- Node* trap_reason_smi = builder_->BuildChangeInt32ToSmi(trap_reason_);
- Node* trap_position_smi = builder_->BuildChangeInt32ToSmi(trap_position_);
-
- if (module && !module->instance->context.is_null()) {
- Node* parameters[] = {trap_reason_smi, // message id
- trap_position_smi}; // byte position
- BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(), parameters,
- arraysize(parameters), effect_ptr, *control_ptr);
- }
- if (false) {
- // End the control flow with a throw
- Node* thrw =
- graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
- *effect_ptr, *control_ptr);
- MergeControlToEnd(jsgraph(), thrw);
- } else {
- // End the control flow with returning 0xdeadbeef
- Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
- builder_->Return(ret_value);
- }
- }
-};
-
WasmGraphBuilder::WasmGraphBuilder(
wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
wasm::FunctionSig* sig,
@@ -364,14 +166,13 @@ WasmGraphBuilder::WasmGraphBuilder(
function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
- trap_(new (zone) WasmTrapHelper(this)),
sig_(sig),
source_position_table_(source_position_table) {
- for (size_t i = 0; i < sig->parameter_count(); i++) {
- if (sig->GetParam(i) == wasm::kWasmS128) has_simd_ = true;
+ for (size_t i = sig->parameter_count(); i > 0 && !has_simd_; --i) {
+ if (sig->GetParam(i - 1) == wasm::kWasmS128) has_simd_ = true;
}
- for (size_t i = 0; i < sig->return_count(); i++) {
- if (sig->GetReturn(i) == wasm::kWasmS128) has_simd_ = true;
+ for (size_t i = sig->return_count(); i > 0 && !has_simd_; --i) {
+ if (sig->GetReturn(i - 1) == wasm::kWasmS128) has_simd_ = true;
}
DCHECK_NOT_NULL(jsgraph_);
}
@@ -480,7 +281,8 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Node** effect, Node** control) {
if (FLAG_wasm_no_stack_checks) return;
// We do not generate stack checks for cctests.
- if (!module_ || module_->instance->context.is_null()) return;
+ if (!module_ || (module_->instance && module_->instance->context.is_null()))
+ return;
if (effect == nullptr) effect = effect_;
if (control == nullptr) control = control_;
@@ -1092,6 +894,95 @@ Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
BranchHint::kFalse);
}
+namespace {
+Builtins::Name GetBuiltinIdForTrap(bool in_cctest, wasm::TrapReason reason) {
+ if (in_cctest) {
+ // We use Builtins::builtin_count as a marker to tell the code generator
+ // to generate a call to a testing c-function instead of a runtime
+ // function. This code should only be called from a cctest.
+ return Builtins::builtin_count;
+ }
+
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case wasm::k##name: \
+ return Builtins::kThrowWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+ default:
+ UNREACHABLE();
+ return Builtins::builtin_count;
+ }
+}
+} // namespace
+
+Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ // TODO(wasm): Introduce a testing flag instead of trying to infer it here.
+ bool in_cctest =
+ !module_ || (module_->instance && module_->instance->context.is_null());
+ int32_t trap_id = GetBuiltinIdForTrap(in_cctest, reason);
+ Node* node = graph()->NewNode(jsgraph()->common()->TrapIf(trap_id), cond,
+ Effect(), Control());
+ *control_ = node;
+ SetSourcePosition(node, position);
+ return node;
+}
+
+Node* WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ // TODO(wasm): Introduce a testing flag instead of trying to infer it here.
+ bool in_cctest =
+ !module_ || (module_->instance && module_->instance->context.is_null());
+ int32_t trap_id = GetBuiltinIdForTrap(in_cctest, reason);
+
+ Node* node = graph()->NewNode(jsgraph()->common()->TrapUnless(trap_id), cond,
+ Effect(), Control());
+ *control_ = node;
+ SetSourcePosition(node, position);
+ return node;
+}
+
+// Add a check that traps if {node} is equal to {val}.
+Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
+ int32_t val,
+ wasm::WasmCodePosition position) {
+ Int32Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (val == 0) {
+ return TrapIfFalse(reason, node, position);
+ } else {
+ return TrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(),
+ node, jsgraph()->Int32Constant(val)),
+ position);
+ }
+}
+
+// Add a check that traps if {node} is zero.
+Node* WasmGraphBuilder::ZeroCheck32(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq32(reason, node, 0, position);
+}
+
+// Add a check that traps if {node} is equal to {val}.
+Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
+ int64_t val,
+ wasm::WasmCodePosition position) {
+ Int64Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ return TrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
+ jsgraph()->Int64Constant(val)),
+ position);
+}
+
+// Add a check that traps if {node} is zero.
+Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq64(reason, node, 0, position);
+}
+
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
}
@@ -1131,10 +1022,11 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
return ret;
}
-Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
+Node* WasmGraphBuilder::ReturnVoid() { return Return(0, nullptr); }
Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
- trap_->Unreachable(position);
+ TrapIfFalse(wasm::TrapReason::kTrapUnreachable, Int32Constant(0), position);
+ ReturnVoid();
return nullptr;
}
@@ -1372,7 +1264,7 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32SConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1388,7 +1280,7 @@ Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64SConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1404,7 +1296,7 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32UConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1420,7 +1312,7 @@ Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64UConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1700,7 +1592,7 @@ Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
graph()->start());
Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
graph()->start());
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1718,7 +1610,7 @@ Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
graph()->start());
Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
graph()->start());
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1736,7 +1628,7 @@ Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
graph()->start());
Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
graph()->start());
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1754,7 +1646,7 @@ Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
graph()->start());
Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
graph()->start());
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1778,8 +1670,8 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
Node* args[] = {function, stack_slot_param, stack_slot_result};
- trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), args), position);
+ ZeroCheck32(wasm::kTrapFloatUnrepresentable,
+ BuildCCall(sig_builder.Build(), args), position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1789,6 +1681,11 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
+ // GrowMemory will not be called from asm.js, hence we cannot be in
+ // lazy-compilation mode, hence the instance will be set.
+ DCHECK_NOT_NULL(module_);
+ DCHECK_NOT_NULL(module_->instance);
+
Diamond check_input_range(
graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
@@ -1801,13 +1698,13 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Node* old_effect = *effect_;
Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
parameters, arraysize(parameters), effect_,
- check_input_range.if_true);
+ &check_input_range.if_true);
Node* result = BuildChangeSmiToInt32(call);
result = check_input_range.Phi(MachineRepresentation::kWord32, result,
jsgraph()->Int32Constant(-1));
- *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call,
+ *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), *effect_,
old_effect, check_input_range.merge);
*control_ = check_input_range.merge;
return result;
@@ -1832,7 +1729,7 @@ Node* WasmGraphBuilder::Throw(Node* input) {
Node* parameters[] = {lower, upper}; // thrown value
return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
- arraysize(parameters), effect_, *control_);
+ arraysize(parameters), effect_, control_);
}
Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
@@ -1841,7 +1738,7 @@ Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
Node* parameters[] = {input}; // caught value
Node* value =
BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
- parameters, arraysize(parameters), effect_, *control_);
+ parameters, arraysize(parameters), effect_, control_);
Node* is_smi;
Node* is_heap;
@@ -1881,7 +1778,7 @@ Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position);
+ ZeroCheck32(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -1889,7 +1786,7 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
+ TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -1903,7 +1800,7 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position);
+ ZeroCheck32(wasm::kTrapRemByZero, right, position);
Diamond d(
graph(), jsgraph()->common(),
@@ -1918,17 +1815,15 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- return graph()->NewNode(
- m->Uint32Div(), left, right,
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position));
+ return graph()->NewNode(m->Uint32Div(), left, right,
+ ZeroCheck32(wasm::kTrapDivByZero, right, position));
}
Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- return graph()->NewNode(
- m->Uint32Mod(), left, right,
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position));
+ return graph()->NewNode(m->Uint32Mod(), left, right,
+ ZeroCheck32(wasm::kTrapRemByZero, right, position));
}
Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
@@ -2111,7 +2006,7 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position);
+ ZeroCheck64(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -2119,8 +2014,8 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
jsgraph()->Int64Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
- trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
- std::numeric_limits<int64_t>::min(), position);
+ TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
+ std::numeric_limits<int64_t>::min(), position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -2138,7 +2033,7 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position);
+ ZeroCheck64(wasm::kTrapRemByZero, right, position);
Diamond d(jsgraph()->graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
jsgraph()->Int64Constant(-1)));
@@ -2159,9 +2054,8 @@ Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- return graph()->NewNode(
- jsgraph()->machine()->Uint64Div(), left, right,
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position));
+ return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
+ ZeroCheck64(wasm::kTrapDivByZero, right, position));
}
Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
@@ -2170,9 +2064,8 @@ Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- return graph()->NewNode(
- jsgraph()->machine()->Uint64Mod(), left, right,
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position));
+ return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
+ ZeroCheck64(wasm::kTrapRemByZero, right, position));
}
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
@@ -2205,8 +2098,8 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
// TODO(wasm): This can get simpler if we have a specialized runtime call to
// throw WASM exceptions by trap code instead of by string.
- trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
+ ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
+ TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
@@ -2279,7 +2172,10 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
DCHECK_NULL(args[0]);
// Add code object as constant.
- Handle<Code> code = module_->GetFunctionCode(index);
+ // TODO(wasm): Always use the illegal builtin, except for testing.
+ Handle<Code> code = module_->instance
+ ? module_->GetFunctionCode(index)
+ : jsgraph()->isolate()->builtins()->Illegal();
DCHECK(!code.is_null());
args[0] = HeapConstant(code);
wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
@@ -2291,7 +2187,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(args[0]);
- DCHECK(module_ && module_->instance);
+ DCHECK_NOT_NULL(module_);
// Assume only one table for now.
uint32_t table_index = 0;
@@ -2306,7 +2202,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
// Bounds check against the table size.
Node* size = function_table_sizes_[table_index];
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
+ TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
Node* table = function_tables_[table_index];
Node* signatures = signature_tables_[table_index];
@@ -2328,7 +2224,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* sig_match = graph()->NewNode(
machine->WordEqual(), load_sig,
jsgraph()->SmiConstant(static_cast<int>(map.FindOrInsert(sig))));
- trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
// Load code object from the table.
@@ -2525,65 +2421,6 @@ Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
MachineOperatorBuilder* machine = jsgraph()->machine();
CommonOperatorBuilder* common = jsgraph()->common();
- if (CanCover(value, IrOpcode::kJSToNumber)) {
- // ChangeTaggedToFloat64(JSToNumber(x)) =>
- // if IsSmi(x) then ChangeSmiToFloat64(x)
- // else let y = JSToNumber(x) in
- // if IsSmi(y) then ChangeSmiToFloat64(y)
- // else BuildLoadHeapNumberValue(y)
- Node* object = NodeProperties::GetValueInput(value, 0);
- Node* context = NodeProperties::GetContextInput(value);
- Node* frame_state = NodeProperties::GetFrameStateInput(value);
- Node* effect = NodeProperties::GetEffectInput(value);
- Node* control = NodeProperties::GetControlInput(value);
-
- const Operator* merge_op = common->Merge(2);
- const Operator* ephi_op = common->EffectPhi(2);
- const Operator* phi_op = common->Phi(MachineRepresentation::kFloat64, 2);
-
- Node* check1 = BuildTestNotSmi(object);
- Node* branch1 =
- graph()->NewNode(common->Branch(BranchHint::kFalse), check1, control);
-
- Node* if_true1 = graph()->NewNode(common->IfTrue(), branch1);
- Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
- effect, if_true1);
- Node* etrue1 = vtrue1;
-
- Node* check2 = BuildTestNotSmi(vtrue1);
- Node* branch2 = graph()->NewNode(common->Branch(), check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common->IfTrue(), branch2);
- Node* vtrue2 = BuildLoadHeapNumberValue(vtrue1, if_true2);
-
- Node* if_false2 = graph()->NewNode(common->IfFalse(), branch2);
- Node* vfalse2 = BuildChangeSmiToFloat64(vtrue1);
-
- if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
- vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
-
- Node* if_false1 = graph()->NewNode(common->IfFalse(), branch1);
- Node* vfalse1 = BuildChangeSmiToFloat64(object);
- Node* efalse1 = effect;
-
- Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
- Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
- Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
-
- // Wire the new diamond into the graph, {JSToNumber} can still throw.
- NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
-
- // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
- // the node and places it inside the diamond. Come up with a helper method!
- for (Node* use : etrue1->uses()) {
- if (use->opcode() == IrOpcode::kIfSuccess) {
- use->ReplaceUses(merge1);
- NodeProperties::ReplaceControlInput(branch2, use);
- }
- }
- return phi1;
- }
-
Node* check = BuildTestNotSmi(value);
Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), check,
graph()->start());
@@ -2733,24 +2570,6 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
-bool IsJSCompatible(wasm::ValueType type) {
- return (type != wasm::kWasmI64) && (type != wasm::kWasmS128);
-}
-
-bool HasJSCompatibleSignature(wasm::FunctionSig* sig) {
- for (size_t i = 0; i < sig->parameter_count(); i++) {
- if (!IsJSCompatible(sig->GetParam(i))) {
- return false;
- }
- }
- for (size_t i = 0; i < sig->return_count(); i++) {
- if (!IsJSCompatible(sig->GetReturn(i))) {
- return false;
- }
- }
- return true;
-}
-
void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
wasm::FunctionSig* sig) {
int wasm_count = static_cast<int>(sig->parameter_count());
@@ -2768,12 +2587,15 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
- if (!HasJSCompatibleSignature(sig_)) {
+ // Set the ThreadInWasm flag before we do the actual call.
+ BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+
+ if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the context of the calling javascript function
// (passed as a parameter), such that the generated code is context
// independent.
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- context, nullptr, 0, effect_, *control_);
+ context, nullptr, 0, effect_, control_);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2811,6 +2633,10 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
*effect_ = call;
+
+ // Clear the ThreadInWasmFlag
+ BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
+
Node* retval = call;
Node* jsval = ToJS(
retval, sig->return_count() == 0 ? wasm::kWasmStmt : sig->GetReturn());
@@ -2841,26 +2667,28 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = start;
*control_ = start;
- if (!HasJSCompatibleSignature(sig_)) {
+ if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Embedding the context is ok here, since this code is
// regenerated at instantiation time.
Node* context =
jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
- Return(BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
- jsgraph(), context, nullptr, 0,
- effect_, *control_));
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
+ context, nullptr, 0, effect_, control_);
+ // We don't need to return a value here, as the runtime call will not return
+ // anyway (the c entry stub will trigger stack unwinding).
+ ReturnVoid();
return;
}
Node** args = Buffer(wasm_count + 7);
- Node* call;
- bool direct_call = false;
+ Node* call = nullptr;
+
+ BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
if (function->shared()->internal_formal_parameter_count() == wasm_count) {
- direct_call = true;
int pos = 0;
args[pos++] = jsgraph()->Constant(target); // target callable.
// Receiver.
@@ -2890,7 +2718,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
}
// We cannot call the target directly, we have to use the Call builtin.
- if (!direct_call) {
+ if (!call) {
int pos = 0;
Callable callable = CodeFactory::Call(isolate);
args[pos++] = jsgraph()->HeapConstant(callable.code());
@@ -2921,10 +2749,11 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = call;
SetSourcePosition(call, 0);
+ BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+
// Convert the return value back.
- Node* i32_zero = jsgraph()->Int32Constant(0);
Node* val = sig->return_count() == 0
- ? i32_zero
+ ? jsgraph()->Int32Constant(0)
: FromJS(call, HeapConstant(isolate->native_context()),
sig->GetReturn());
Return(val);
@@ -2946,8 +2775,7 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
// Compute size for the argument buffer.
int args_size_bytes = 0;
for (int i = 0; i < wasm_count; i++) {
- args_size_bytes +=
- RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(sig->GetParam(i)), 8);
+ args_size_bytes += 1 << ElementSizeLog2Of(sig->GetParam(i));
}
// The return value is also passed via this buffer:
@@ -2966,12 +2794,11 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
// Now store all our arguments to the buffer.
int param_index = 0;
int offset = 0;
+
for (int i = 0; i < wasm_count; i++) {
Node* param = Param(param_index++);
- bool is_i64_as_two_params =
- jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
-
- if (is_i64_as_two_params) {
+ if (Int64Lowering::IsI64AsTwoParameters(jsgraph()->machine(),
+ sig->GetParam(i))) {
StoreRepresentation store_rep(wasm::kWasmI32,
WriteBarrierKind::kNoWriteBarrier);
*effect_ =
@@ -2993,10 +2820,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
*effect_ =
graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
Int32Constant(offset), param, *effect_, *control_);
- offset += RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(param_rep), 8);
+ offset += 1 << ElementSizeLog2Of(param_rep);
}
-
- DCHECK(IsAligned(offset, 8));
}
DCHECK_EQ(param_count, param_index);
DCHECK_EQ(args_size_bytes, offset);
@@ -3010,95 +2835,80 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
arg_buffer, // argument buffer
};
BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
- arraysize(parameters), effect_, *control_);
+ arraysize(parameters), effect_, control_);
// Read back the return value.
- if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
- sig->GetReturn() == wasm::kWasmI64) {
+ if (sig->return_count() == 0) {
+ Return(Int32Constant(0));
+ } else if (Int64Lowering::IsI64AsTwoParameters(jsgraph()->machine(),
+ sig->GetReturn())) {
MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(wasm::kWasmI32);
Node* lower =
graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
- Int32Constant(0), *effect_, *control_);
+ Int32Constant(kInt64LowerHalfMemoryOffset), *effect_,
+ *control_);
Node* upper =
graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
- Int32Constant(sizeof(int32_t)), *effect_, *control_);
- Return(upper, lower);
+ Int32Constant(kInt64UpperHalfMemoryOffset), lower,
+ *control_);
+ *effect_ = upper;
+ Return(lower, upper);
} else {
- Node* val;
- if (sig->return_count() == 0) {
- val = Int32Constant(0);
- } else {
- MachineType load_rep =
- wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
- val = graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
- Int32Constant(0), *effect_, *control_);
- }
+ MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
+ Node* val =
+ graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(0), *effect_, *control_);
Return(val);
}
}
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
- DCHECK(module_ && module_->instance);
+ DCHECK_NOT_NULL(module_);
+ uintptr_t mem_start = reinterpret_cast<uintptr_t>(
+ module_->instance ? module_->instance->mem_start : nullptr);
if (offset == 0) {
if (!mem_buffer_) {
mem_buffer_ = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start),
- RelocInfo::WASM_MEMORY_REFERENCE);
+ mem_start, RelocInfo::WASM_MEMORY_REFERENCE);
}
return mem_buffer_;
} else {
return jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset),
- RelocInfo::WASM_MEMORY_REFERENCE);
+ mem_start + offset, RelocInfo::WASM_MEMORY_REFERENCE);
}
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
- Runtime::FunctionId function_id = Runtime::kWasmMemorySize;
- const Runtime::Function* function = Runtime::FunctionForId(function_id);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
- CallDescriptor::kNoFlags);
- wasm::ModuleEnv* module = module_;
- Node* inputs[] = {
- jsgraph()->CEntryStubConstant(function->result_size), // C entry
- jsgraph()->ExternalConstant(
- ExternalReference(function_id, jsgraph()->isolate())), // ref
- jsgraph()->Int32Constant(function->nargs), // arity
- jsgraph()->HeapConstant(module->instance->context), // context
- *effect_,
- *control_};
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
- static_cast<int>(arraysize(inputs)), inputs);
-
+ // CurrentMemoryPages will not be called from asm.js, hence we cannot be in
+ // lazy-compilation mode, hence the instance will be set.
+ DCHECK_EQ(wasm::kWasmOrigin, module_->module->get_origin());
+ DCHECK_NOT_NULL(module_);
+ DCHECK_NOT_NULL(module_->instance);
+ Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), nullptr,
+ 0, effect_, control_);
Node* result = BuildChangeSmiToInt32(call);
-
- *effect_ = call;
return result;
}
-Node* WasmGraphBuilder::MemSize(uint32_t offset) {
- DCHECK(module_ && module_->instance);
- uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
- if (offset == 0) {
- if (!mem_size_)
- mem_size_ = jsgraph()->RelocatableInt32Constant(
- size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- return mem_size_;
- } else {
- return jsgraph()->RelocatableInt32Constant(
- size + offset, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- }
+Node* WasmGraphBuilder::MemSize() {
+ DCHECK_NOT_NULL(module_);
+ if (mem_size_) return mem_size_;
+ uint32_t size = module_->instance ? module_->instance->mem_size : 0;
+ mem_size_ = jsgraph()->RelocatableInt32Constant(
+ size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ return mem_size_;
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
if (function_tables_.size() > 0) return;
- size_t tables_size = module_->instance->function_tables.size();
- DCHECK(tables_size == module_->instance->signature_tables.size());
+ size_t tables_size = module_->module->function_tables.size();
+ if (module_->instance) {
+ DCHECK_EQ(tables_size, module_->instance->function_tables.size());
+ DCHECK_EQ(tables_size, module_->instance->signature_tables.size());
+ }
for (size_t i = 0; i < tables_size; ++i) {
- auto function_handle = module_->instance->function_tables[i];
- auto signature_handle = module_->instance->signature_tables[i];
- DCHECK(!function_handle.is_null() && !signature_handle.is_null());
+ auto function_handle = (*module_->function_tables)[i];
+ auto signature_handle = (*module_->signature_tables)[i];
function_tables_.push_back(HeapConstant(function_handle));
signature_tables_.push_back(HeapConstant(signature_handle));
uint32_t table_size = module_->module->function_tables[i].min_size;
@@ -3111,10 +2921,12 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
+ byte* globals_start =
+ module_->instance ? module_->instance->globals_start : nullptr;
+ uintptr_t global_addr = reinterpret_cast<uintptr_t>(
+ globals_start + module_->module->globals[index].offset);
Node* addr = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals[index].offset),
- RelocInfo::WASM_GLOBAL_REFERENCE);
+ global_addr, RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Load(mem_type);
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
*control_);
@@ -3125,8 +2937,10 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
+ byte* globals_start =
+ module_->instance ? module_->instance->globals_start : 0;
Node* addr = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+ reinterpret_cast<uintptr_t>(globals_start +
module_->module->globals[index].offset),
RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Store(
@@ -3140,9 +2954,9 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t offset,
wasm::WasmCodePosition position) {
- DCHECK(module_ && module_->instance);
if (FLAG_wasm_no_bounds_checks) return;
- uint32_t size = module_->instance->mem_size;
+ uint32_t size =
+ module_ && module_->instance ? module_->instance->mem_size : 0;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
size_t effective_size;
@@ -3156,8 +2970,8 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
if ((std::numeric_limits<uint32_t>::max() - memsize) + 1 < offset) {
// Always trap. Do not use TrapAlways because it does not create a valid
// graph here.
- trap_->TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0),
- 0, position);
+ TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
+ position);
return;
}
size_t effective_offset = (offset - 1) + memsize;
@@ -3167,7 +2981,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(size),
RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
// For offset > effective size, this relies on check above to fail and
// effective size can be negative, relies on wrap around.
effective_size = size - offset - memsize + 1;
@@ -3189,7 +3003,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(effective_size),
RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
@@ -3199,15 +3013,13 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+ if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
- bool aligned = static_cast<int>(alignment) >=
- ElementSizeLog2Of(memtype.representation());
- if (aligned ||
+ if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
- if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ if (FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED) {
DCHECK(FLAG_wasm_guard_pages);
Node* position_node = jsgraph()->Int32Constant(position);
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
@@ -3219,7 +3031,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
- DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
+ DCHECK(!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED);
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -3253,21 +3065,17 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
Node* store;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+ if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
-
- bool aligned = static_cast<int>(alignment) >=
- ElementSizeLog2Of(memtype.representation());
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndianness(val, memtype);
#endif
- if (aligned ||
+ if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
- if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ if (FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED) {
Node* position_node = jsgraph()->Int32Constant(position);
store = graph()->NewNode(
jsgraph()->machine()->ProtectedStore(memtype.representation()),
@@ -3280,7 +3088,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
- DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
+ DCHECK(!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED);
UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3296,8 +3104,8 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
// TODO(turbofan): fold bounds checks for constant asm.js loads.
// asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
const Operator* op = jsgraph()->machine()->CheckedLoad(type);
- Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
- *control_);
+ Node* load =
+ graph()->NewNode(op, MemBuffer(0), index, MemSize(), *effect_, *control_);
*effect_ = load;
return load;
}
@@ -3308,7 +3116,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
// asm.js semantics use CheckedStore (i.e. ignore OOB writes).
const Operator* op =
jsgraph()->machine()->CheckedStore(type.representation());
- Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val,
+ Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(), val,
*effect_, *control_);
*effect_ = store;
return val;
@@ -3334,9 +3142,7 @@ void WasmGraphBuilder::Int64LoweringForTesting() {
}
void WasmGraphBuilder::SimdScalarLoweringForTesting() {
- SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
- jsgraph()->common(), jsgraph()->zone(), sig_)
- .LowerGraph();
+ SimdScalarLowering(jsgraph(), sig_).LowerGraph();
}
void WasmGraphBuilder::SetSourcePosition(Node* node,
@@ -3346,13 +3152,24 @@ void WasmGraphBuilder::SetSourcePosition(Node* node,
source_position_table_->SetSourcePosition(node, SourcePosition(position));
}
-Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
- // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
- // instead of creating a SIMD Value.
+Node* WasmGraphBuilder::S128Zero() {
+ has_simd_ = true;
+ return graph()->NewNode(jsgraph()->machine()->S128Zero());
+}
+
+Node* WasmGraphBuilder::S1x4Zero() {
+ has_simd_ = true;
+ return graph()->NewNode(jsgraph()->machine()->S1x4Zero());
+}
+
+Node* WasmGraphBuilder::S1x8Zero() {
+ has_simd_ = true;
+ return graph()->NewNode(jsgraph()->machine()->S1x8Zero());
+}
+
+Node* WasmGraphBuilder::S1x16Zero() {
has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
- Int32Constant(value), Int32Constant(value),
- Int32Constant(value), Int32Constant(value));
+ return graph()->NewNode(jsgraph()->machine()->S1x16Zero());
}
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
@@ -3360,258 +3177,363 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
- inputs[0], inputs[0], inputs[0], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
- return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
+ return graph()->NewNode(jsgraph()->machine()->F32x4SConvertI32x4(),
inputs[0]);
case wasm::kExprF32x4UConvertI32x4:
- return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
+ return graph()->NewNode(jsgraph()->machine()->F32x4UConvertI32x4(),
inputs[0]);
case wasm::kExprF32x4Abs:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Abs(), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->F32x4Abs(), inputs[0]);
case wasm::kExprF32x4Neg:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Neg(), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->F32x4Neg(), inputs[0]);
+ case wasm::kExprF32x4Sqrt:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Sqrt(), inputs[0]);
+ case wasm::kExprF32x4RecipApprox:
+ return graph()->NewNode(jsgraph()->machine()->F32x4RecipApprox(),
+ inputs[0]);
+ case wasm::kExprF32x4RecipSqrtApprox:
+ return graph()->NewNode(jsgraph()->machine()->F32x4RecipSqrtApprox(),
+ inputs[0]);
case wasm::kExprF32x4Add:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->F32x4Add(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Sub:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->F32x4Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Mul:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Div:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Div(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Min:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Min(), inputs[0],
inputs[1]);
+ case wasm::kExprF32x4Max:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4RecipRefine:
+ return graph()->NewNode(jsgraph()->machine()->F32x4RecipRefine(),
+ inputs[0], inputs[1]);
+ case wasm::kExprF32x4RecipSqrtRefine:
+ return graph()->NewNode(jsgraph()->machine()->F32x4RecipSqrtRefine(),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4Eq:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Equal(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->F32x4Eq(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Ne:
- return graph()->NewNode(jsgraph()->machine()->Float32x4NotEqual(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->F32x4Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Lt:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Lt(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Le:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Le(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Gt:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Lt(), inputs[1],
+ inputs[0]);
+ case wasm::kExprF32x4Ge:
+ return graph()->NewNode(jsgraph()->machine()->F32x4Le(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
- inputs[0], inputs[0], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
- return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
+ return graph()->NewNode(jsgraph()->machine()->I32x4SConvertF32x4(),
inputs[0]);
case wasm::kExprI32x4UConvertF32x4:
- return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
+ return graph()->NewNode(jsgraph()->machine()->I32x4UConvertF32x4(),
+ inputs[0]);
+ case wasm::kExprI32x4SConvertI16x8Low:
+ return graph()->NewNode(jsgraph()->machine()->I32x4SConvertI16x8Low(),
+ inputs[0]);
+ case wasm::kExprI32x4SConvertI16x8High:
+ return graph()->NewNode(jsgraph()->machine()->I32x4SConvertI16x8High(),
inputs[0]);
case wasm::kExprI32x4Neg:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Neg(), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4Neg(), inputs[0]);
case wasm::kExprI32x4Add:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4Add(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Sub:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4Sub(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Mul:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Mul(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4Mul(), inputs[0],
inputs[1]);
case wasm::kExprI32x4MinS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4MinS(), inputs[0],
inputs[1]);
case wasm::kExprI32x4MaxS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4MaxS(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Eq:
- return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4Eq(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Ne:
- return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4Ne(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4LtS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4LeS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GtS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4GeS:
- return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI32x4UConvertI16x8Low:
+ return graph()->NewNode(jsgraph()->machine()->I32x4UConvertI16x8Low(),
+ inputs[0]);
+ case wasm::kExprI32x4UConvertI16x8High:
+ return graph()->NewNode(jsgraph()->machine()->I32x4UConvertI16x8High(),
+ inputs[0]);
case wasm::kExprI32x4MinU:
- return graph()->NewNode(jsgraph()->machine()->Uint32x4Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4MinU(), inputs[0],
inputs[1]);
case wasm::kExprI32x4MaxU:
- return graph()->NewNode(jsgraph()->machine()->Uint32x4Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I32x4MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtU:
- return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4LeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GtU:
- return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4GeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateInt16x8(), inputs[0],
- inputs[0], inputs[0], inputs[0], inputs[0],
- inputs[0], inputs[0], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8Splat(), inputs[0]);
+ case wasm::kExprI16x8SConvertI8x16Low:
+ return graph()->NewNode(jsgraph()->machine()->I16x8SConvertI8x16Low(),
+ inputs[0]);
+ case wasm::kExprI16x8SConvertI8x16High:
+ return graph()->NewNode(jsgraph()->machine()->I16x8SConvertI8x16High(),
+ inputs[0]);
case wasm::kExprI16x8Neg:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Neg(), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8Neg(), inputs[0]);
+ case wasm::kExprI16x8SConvertI32x4:
+ return graph()->NewNode(jsgraph()->machine()->I16x8SConvertI32x4(),
+ inputs[0], inputs[1]);
case wasm::kExprI16x8Add:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Add(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8Add(), inputs[0],
inputs[1]);
case wasm::kExprI16x8AddSaturateS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8AddSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I16x8AddSaturateS(),
inputs[0], inputs[1]);
case wasm::kExprI16x8Sub:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Sub(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
case wasm::kExprI16x8SubSaturateS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8SubSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I16x8SubSaturateS(),
inputs[0], inputs[1]);
case wasm::kExprI16x8Mul:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Mul(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8Mul(), inputs[0],
inputs[1]);
case wasm::kExprI16x8MinS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8MinS(), inputs[0],
inputs[1]);
case wasm::kExprI16x8MaxS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8MaxS(), inputs[0],
inputs[1]);
case wasm::kExprI16x8Eq:
- return graph()->NewNode(jsgraph()->machine()->Int16x8Equal(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8Eq(), inputs[0],
inputs[1]);
case wasm::kExprI16x8Ne:
- return graph()->NewNode(jsgraph()->machine()->Int16x8NotEqual(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8Ne(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8LtS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8LeS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GtS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8GeS:
- return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+ return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI16x8UConvertI8x16Low:
+ return graph()->NewNode(jsgraph()->machine()->I16x8UConvertI8x16Low(),
+ inputs[0]);
+ case wasm::kExprI16x8UConvertI8x16High:
+ return graph()->NewNode(jsgraph()->machine()->I16x8UConvertI8x16High(),
+ inputs[0]);
+ case wasm::kExprI16x8UConvertI32x4:
+ return graph()->NewNode(jsgraph()->machine()->I16x8UConvertI32x4(),
inputs[0], inputs[1]);
case wasm::kExprI16x8AddSaturateU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8AddSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I16x8AddSaturateU(),
inputs[0], inputs[1]);
case wasm::kExprI16x8SubSaturateU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8SubSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I16x8SubSaturateU(),
inputs[0], inputs[1]);
case wasm::kExprI16x8MinU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8MinU(), inputs[0],
inputs[1]);
case wasm::kExprI16x8MaxU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I16x8MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8LeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GtU:
- return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8GeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateInt8x16(), inputs[0],
- inputs[0], inputs[0], inputs[0], inputs[0],
- inputs[0], inputs[0], inputs[0], inputs[0],
- inputs[0], inputs[0], inputs[0], inputs[0],
- inputs[0], inputs[0], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Neg(), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16Neg(), inputs[0]);
+ case wasm::kExprI8x16SConvertI16x8:
+ return graph()->NewNode(jsgraph()->machine()->I8x16SConvertI16x8(),
+ inputs[0], inputs[1]);
case wasm::kExprI8x16Add:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Add(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16Add(), inputs[0],
inputs[1]);
case wasm::kExprI8x16AddSaturateS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16AddSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I8x16AddSaturateS(),
inputs[0], inputs[1]);
case wasm::kExprI8x16Sub:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Sub(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16Sub(), inputs[0],
inputs[1]);
case wasm::kExprI8x16SubSaturateS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16SubSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I8x16SubSaturateS(),
inputs[0], inputs[1]);
case wasm::kExprI8x16Mul:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Mul(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16Mul(), inputs[0],
inputs[1]);
case wasm::kExprI8x16MinS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16MinS(), inputs[0],
inputs[1]);
case wasm::kExprI8x16MaxS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16MaxS(), inputs[0],
inputs[1]);
case wasm::kExprI8x16Eq:
- return graph()->NewNode(jsgraph()->machine()->Int8x16Equal(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16Eq(), inputs[0],
inputs[1]);
case wasm::kExprI8x16Ne:
- return graph()->NewNode(jsgraph()->machine()->Int8x16NotEqual(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16Ne(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16LtS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16LeS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GtS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16GeS:
- return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+ return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI8x16UConvertI16x8:
+ return graph()->NewNode(jsgraph()->machine()->I8x16UConvertI16x8(),
inputs[0], inputs[1]);
case wasm::kExprI8x16AddSaturateU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16AddSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I8x16AddSaturateU(),
inputs[0], inputs[1]);
case wasm::kExprI8x16SubSaturateU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16SubSaturate(),
+ return graph()->NewNode(jsgraph()->machine()->I8x16SubSaturateU(),
inputs[0], inputs[1]);
case wasm::kExprI8x16MinU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16Min(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16MinU(), inputs[0],
inputs[1]);
case wasm::kExprI8x16MaxU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16Max(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->I8x16MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
- inputs[1], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16LeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GtU:
- return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
- inputs[0], inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16GeU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprS128And:
+ return graph()->NewNode(jsgraph()->machine()->S128And(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Or:
+ return graph()->NewNode(jsgraph()->machine()->S128Or(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Xor:
+ return graph()->NewNode(jsgraph()->machine()->S128Xor(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Not:
+ return graph()->NewNode(jsgraph()->machine()->S128Not(), inputs[0]);
case wasm::kExprS32x4Select:
- return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->S32x4Select(), inputs[0],
inputs[1], inputs[2]);
case wasm::kExprS16x8Select:
- return graph()->NewNode(jsgraph()->machine()->Simd16x8Select(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->S16x8Select(), inputs[0],
inputs[1], inputs[2]);
case wasm::kExprS8x16Select:
- return graph()->NewNode(jsgraph()->machine()->Simd8x16Select(), inputs[0],
+ return graph()->NewNode(jsgraph()->machine()->S8x16Select(), inputs[0],
inputs[1], inputs[2]);
- case wasm::kExprS128And:
- return graph()->NewNode(jsgraph()->machine()->Simd128And(), inputs[0],
+ case wasm::kExprS1x4And:
+ return graph()->NewNode(jsgraph()->machine()->S1x4And(), inputs[0],
inputs[1]);
- case wasm::kExprS128Or:
- return graph()->NewNode(jsgraph()->machine()->Simd128Or(), inputs[0],
+ case wasm::kExprS1x4Or:
+ return graph()->NewNode(jsgraph()->machine()->S1x4Or(), inputs[0],
inputs[1]);
- case wasm::kExprS128Xor:
- return graph()->NewNode(jsgraph()->machine()->Simd128Xor(), inputs[0],
+ case wasm::kExprS1x4Xor:
+ return graph()->NewNode(jsgraph()->machine()->S1x4Xor(), inputs[0],
inputs[1]);
- case wasm::kExprS128Not:
- return graph()->NewNode(jsgraph()->machine()->Simd128Not(), inputs[0]);
+ case wasm::kExprS1x4Not:
+ return graph()->NewNode(jsgraph()->machine()->S1x4Not(), inputs[0]);
+ case wasm::kExprS1x4AnyTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x4AnyTrue(), inputs[0]);
+ case wasm::kExprS1x4AllTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x4AllTrue(), inputs[0]);
+ case wasm::kExprS1x8And:
+ return graph()->NewNode(jsgraph()->machine()->S1x8And(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x8Or:
+ return graph()->NewNode(jsgraph()->machine()->S1x8Or(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x8Xor:
+ return graph()->NewNode(jsgraph()->machine()->S1x8Xor(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x8Not:
+ return graph()->NewNode(jsgraph()->machine()->S1x8Not(), inputs[0]);
+ case wasm::kExprS1x8AnyTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x8AnyTrue(), inputs[0]);
+ case wasm::kExprS1x8AllTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x8AllTrue(), inputs[0]);
+ case wasm::kExprS1x16And:
+ return graph()->NewNode(jsgraph()->machine()->S1x16And(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x16Or:
+ return graph()->NewNode(jsgraph()->machine()->S1x16Or(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x16Xor:
+ return graph()->NewNode(jsgraph()->machine()->S1x16Xor(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS1x16Not:
+ return graph()->NewNode(jsgraph()->machine()->S1x16Not(), inputs[0]);
+ case wasm::kExprS1x16AnyTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x16AnyTrue(), inputs[0]);
+ case wasm::kExprS1x16AllTrue:
+ return graph()->NewNode(jsgraph()->machine()->S1x16AllTrue(), inputs[0]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3622,28 +3544,28 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->F32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprF32x4ReplaceLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ReplaceLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->F32x4ReplaceLane(lane),
inputs[0], inputs[1]);
case wasm::kExprI32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprI32x4ReplaceLane:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ReplaceLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I32x4ReplaceLane(lane),
inputs[0], inputs[1]);
case wasm::kExprI16x8ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int16x8ExtractLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I16x8ExtractLane(lane),
inputs[0]);
case wasm::kExprI16x8ReplaceLane:
- return graph()->NewNode(jsgraph()->machine()->Int16x8ReplaceLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I16x8ReplaceLane(lane),
inputs[0], inputs[1]);
case wasm::kExprI8x16ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int8x16ExtractLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I8x16ExtractLane(lane),
inputs[0]);
case wasm::kExprI8x16ReplaceLane:
- return graph()->NewNode(jsgraph()->machine()->Int8x16ReplaceLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->I8x16ReplaceLane(lane),
inputs[0], inputs[1]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
@@ -3655,32 +3577,29 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4Shl:
- return graph()->NewNode(
- jsgraph()->machine()->Int32x4ShiftLeftByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4Shl(shift), inputs[0]);
case wasm::kExprI32x4ShrS:
- return graph()->NewNode(
- jsgraph()->machine()->Int32x4ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4ShrS(shift),
+ inputs[0]);
case wasm::kExprI32x4ShrU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint32x4ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4ShrU(shift),
+ inputs[0]);
case wasm::kExprI16x8Shl:
- return graph()->NewNode(
- jsgraph()->machine()->Int16x8ShiftLeftByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8Shl(shift), inputs[0]);
case wasm::kExprI16x8ShrS:
- return graph()->NewNode(
- jsgraph()->machine()->Int16x8ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8ShrS(shift),
+ inputs[0]);
case wasm::kExprI16x8ShrU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint16x8ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8ShrU(shift),
+ inputs[0]);
case wasm::kExprI8x16Shl:
- return graph()->NewNode(
- jsgraph()->machine()->Int8x16ShiftLeftByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16Shl(shift), inputs[0]);
case wasm::kExprI8x16ShrS:
- return graph()->NewNode(
- jsgraph()->machine()->Int8x16ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16ShrS(shift),
+ inputs[0]);
case wasm::kExprI8x16ShrU:
- return graph()->NewNode(
- jsgraph()->machine()->Uint8x16ShiftRightByScalar(shift), inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16ShrU(shift),
+ inputs[0]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3691,13 +3610,13 @@ Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
has_simd_ = true;
switch (opcode) {
case wasm::kExprS32x4Swizzle:
- return graph()->NewNode(jsgraph()->machine()->Simd32x4Swizzle(swizzle),
+ return graph()->NewNode(jsgraph()->machine()->S32x4Swizzle(swizzle),
inputs[0]);
case wasm::kExprS16x8Swizzle:
- return graph()->NewNode(jsgraph()->machine()->Simd16x8Swizzle(swizzle),
+ return graph()->NewNode(jsgraph()->machine()->S16x8Swizzle(swizzle),
inputs[0]);
case wasm::kExprS8x16Swizzle:
- return graph()->NewNode(jsgraph()->machine()->Simd8x16Swizzle(swizzle),
+ return graph()->NewNode(jsgraph()->machine()->S8x16Swizzle(swizzle),
inputs[0]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
@@ -3897,10 +3816,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements());
+ MachineOperatorBuilder machine(&zone);
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -3970,44 +3886,32 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
MachineOperatorBuilder* machine = jsgraph_->machine();
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
- WasmGraphBuilder builder(&module_env_->module_env, jsgraph_->zone(), jsgraph_,
- function_->sig, source_position_table);
- const byte* module_start = module_env_->wire_bytes.start();
- wasm::FunctionBody body = {function_->sig, module_start,
- module_start + function_->code_start_offset,
- module_start + function_->code_end_offset};
+ WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
+ func_body_.sig, source_position_table);
graph_construction_result_ =
- wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
+ wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
- os << "Compilation failed: " << graph_construction_result_ << std::endl;
+ os << "Compilation failed: " << graph_construction_result_.error_msg
+ << std::endl;
}
return nullptr;
}
if (machine->Is32()) {
- Int64Lowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ Int64Lowering(graph, machine, common, jsgraph_->zone(), func_body_.sig)
.LowerGraph();
}
- if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
- SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
- .LowerGraph();
+ if (builder.has_simd() && !CpuFeatures::SupportsWasmSimd128()) {
+ SimdScalarLowering(jsgraph_, func_body_.sig).LowerGraph();
}
- int index = static_cast<int>(function_->func_index);
-
- if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
- OFStream os(stdout);
- PrintRawWasmCode(isolate_->allocator(), body,
- module_env_->module_env.module);
- }
- if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
- OFStream os(stdout);
- PrintWasmText(module_env_->module_env.module, module_env_->wire_bytes,
- function_->func_index, os, nullptr);
+ if (func_index_ >= FLAG_trace_wasm_ast_start &&
+ func_index_ < FLAG_trace_wasm_ast_end) {
+ PrintRawWasmCode(isolate_->allocator(), func_body_, module_env_->module);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -4015,55 +3919,80 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
return source_position_table;
}
-char* WasmCompilationUnit::GetTaggedFunctionName(
- const wasm::WasmFunction* function) {
- snprintf(function_name_, sizeof(function_name_), "wasm#%d",
- function->func_index);
- return function_name_;
+namespace {
+Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
+ if (!name.is_empty()) {
+ return name;
+ }
+ constexpr int kBufferLength = 15;
+
+ EmbeddedVector<char, kBufferLength> name_vector;
+ int name_len = SNPrintF(name_vector, "wasm#%d", index);
+ DCHECK(name_len > 0 && name_len < name_vector.length());
+
+ char* index_name = zone->NewArray<char>(name_len);
+ memcpy(index_name, name_vector.start(), name_len);
+ return Vector<const char>(index_name, name_len);
}
+} // namespace
-WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
- Isolate* isolate,
+WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function,
- uint32_t index)
- : thrower_(thrower),
- isolate_(isolate),
+ const wasm::WasmFunction* function)
+ : WasmCompilationUnit(
+ isolate, &module_env->module_env,
+ wasm::FunctionBody{
+ function->sig, module_env->wire_bytes.start(),
+ module_env->wire_bytes.start() + function->code_start_offset,
+ module_env->wire_bytes.start() + function->code_end_offset},
+ module_env->wire_bytes.GetNameOrNull(function),
+ function->func_index) {}
+
+WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ wasm::FunctionBody body,
+ wasm::WasmName name, int index)
+ : isolate_(isolate),
module_env_(module_env),
- function_(&module_env->module_env.module->functions[index]),
+ func_body_(body),
+ func_name_(name),
graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
jsgraph_(new (graph_zone()) JSGraph(
isolate, new (graph_zone()) Graph(graph_zone()),
new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
- nullptr, new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()))),
+ nullptr,
+ new (graph_zone()) MachineOperatorBuilder(
+ graph_zone(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()))),
compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(function->name_length != 0
- ? module_env->wire_bytes.GetNameOrNull(function)
- : CStrVector(GetTaggedFunctionName(function)),
- isolate, &compilation_zone_,
- Code::ComputeFlags(Code::WASM_FUNCTION)),
- job_(),
- index_(index),
- ok_(true),
- protected_instructions_(&compilation_zone_) {
- // Create and cache this node in the main thread.
+ info_(GetDebugName(&compilation_zone_, name, index), isolate,
+ &compilation_zone_, Code::ComputeFlags(Code::WASM_FUNCTION)),
+ func_index_(index),
+ protected_instructions_(&compilation_zone_) {}
+
+void WasmCompilationUnit::InitializeHandles() {
+ // Create and cache this node in the main thread, which contains a handle to
+ // the code object of the c-entry stub.
jsgraph_->CEntryStubConstant(1);
+ DCHECK(!handles_initialized_);
+#if DEBUG
+ handles_initialized_ = true;
+#endif // DEBUG
}
void WasmCompilationUnit::ExecuteCompilation() {
+ DCHECK(handles_initialized_);
// TODO(ahaas): The counters are not thread-safe at the moment.
// HistogramTimerScope wasm_compile_function_time_scope(
// isolate_->counters()->wasm_compile_function_time());
if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compiling WASM function "
- << wasm::WasmFunctionName(
- function_, module_env_->wire_bytes.GetNameOrNull(function_))
- << std::endl;
- os << std::endl;
+ if (func_name_.start() != nullptr) {
+ PrintF("Compiling WASM function %d:'%.*s'\n\n", func_index(),
+ func_name_.length(), func_name_.start());
+ } else {
+ PrintF("Compiling WASM function %d:<unnamed>\n\n", func_index());
+ }
}
double decode_ms = 0;
@@ -4085,14 +4014,14 @@ void WasmCompilationUnit::ExecuteCompilation() {
// Run the compiler pipeline to generate machine code.
CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- &compilation_zone_, function_->sig);
+ &compilation_zone_, func_body_.sig);
if (jsgraph_->machine()->Is32()) {
- descriptor = module_env_->module_env.GetI32WasmCallDescriptor(
- &compilation_zone_, descriptor);
+ descriptor =
+ module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
}
job_.reset(Pipeline::NewWasmCompilationJob(
&info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
- module_env_->module_env.module->origin != wasm::kWasmOrigin));
+ !module_env_->module->is_wasm()));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(ahaas): The counters are not thread-safe at the moment.
@@ -4105,20 +4034,25 @@ void WasmCompilationUnit::ExecuteCompilation() {
PrintF(
"wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
"%0.3f ms pipeline\n",
- function_->code_end_offset - function_->code_start_offset, decode_ms,
+ static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
node_count, pipeline_ms);
}
}
-Handle<Code> WasmCompilationUnit::FinishCompilation() {
+Handle<Code> WasmCompilationUnit::FinishCompilation(
+ wasm::ErrorThrower* thrower) {
if (!ok_) {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- wasm::WasmName name = module_env_->wire_bytes.GetName(function_);
- SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
- function_->func_index, name.length(), name.start());
- thrower_->CompileFailed(buffer.start(), graph_construction_result_);
+ if (func_name_.start() == nullptr) {
+ SNPrintF(buffer,
+ "Compiling WASM function #%d:%.*s failed:", func_index_,
+ func_name_.length(), func_name_.start());
+ } else {
+ SNPrintF(buffer, "Compiling WASM function #%d failed:", func_index_);
+ }
+ thrower->CompileFailed(buffer.start(), graph_construction_result_);
}
return Handle<Code>::null();
@@ -4136,21 +4070,30 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
- "WASM_function", function_->func_index,
- wasm::WasmName("module"),
- module_env_->wire_bytes.GetName(function_));
+ "WASM_function", func_index_,
+ wasm::WasmName("module"), func_name_);
}
if (FLAG_trace_wasm_decode_time) {
double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
- function_->code_end_offset - function_->code_start_offset,
+ static_cast<unsigned>(func_body_.end - func_body_.start),
codegen_ms);
}
return code;
}
+// static
+Handle<Code> WasmCompilationUnit::CompileWasmFunction(
+ wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function) {
+ WasmCompilationUnit unit(isolate, module_env, function);
+ unit.InitializeHandles();
+ unit.ExecuteCompilation();
+ return unit.FinishCompilation(thrower);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 706c386f5e..128bfbde00 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -12,6 +12,7 @@
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -46,45 +47,42 @@ typedef compiler::JSGraph TFGraph;
namespace compiler {
class WasmCompilationUnit final {
public:
- WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function, uint32_t index);
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
+ const wasm::WasmFunction* function);
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
+ wasm::FunctionBody body, wasm::WasmName name, int index);
Zone* graph_zone() { return graph_zone_.get(); }
- int index() const { return index_; }
+ int func_index() const { return func_index_; }
+ void InitializeHandles();
void ExecuteCompilation();
- Handle<Code> FinishCompilation();
+ Handle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(thrower, isolate, module_env, function,
- function->func_index);
- unit.ExecuteCompilation();
- return unit.FinishCompilation();
- }
+ const wasm::WasmFunction* function);
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
- char* GetTaggedFunctionName(const wasm::WasmFunction* function);
- wasm::ErrorThrower* thrower_;
Isolate* isolate_;
- wasm::ModuleBytesEnv* module_env_;
- const wasm::WasmFunction* function_;
- // Function name is tagged with uint32 func_index - wasm#<func_index>
- char function_name_[16];
+ wasm::ModuleEnv* module_env_;
+ wasm::FunctionBody func_body_;
+ wasm::WasmName func_name_;
// The graph zone is deallocated at the end of ExecuteCompilation.
std::unique_ptr<Zone> graph_zone_;
JSGraph* jsgraph_;
Zone compilation_zone_;
CompilationInfo info_;
std::unique_ptr<CompilationJob> job_;
- uint32_t index_;
+ int func_index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
- bool ok_;
+ bool ok_ = true;
+#if DEBUG
+ bool handles_initialized_ = false;
+#endif // DEBUG
ZoneVector<trap_handler::ProtectedInstructionData>
protected_instructions_; // Instructions that are protected by the signal
// handler.
@@ -171,6 +169,19 @@ class WasmGraphBuilder {
Node* BranchExpectTrue(Node* cond, Node** true_node, Node** false_node);
Node* BranchExpectFalse(Node* cond, Node** true_node, Node** false_node);
+ Node* TrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position);
+ Node* TrapIfFalse(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position);
+ Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+ wasm::WasmCodePosition position);
+ Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position);
+ Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+ wasm::WasmCodePosition position);
+ Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position);
+
Node* Switch(unsigned count, Node* key);
Node* IfValue(int32_t value, Node* sw);
Node* IfDefault(Node* sw);
@@ -228,7 +239,10 @@ class WasmGraphBuilder {
void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
- Node* CreateS128Value(int32_t value);
+ Node* S128Zero();
+ Node* S1x4Zero();
+ Node* S1x8Zero();
+ Node* S1x16Zero();
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
@@ -264,7 +278,6 @@ class WasmGraphBuilder {
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
- WasmTrapHelper* trap_;
wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
@@ -275,7 +288,7 @@ class WasmGraphBuilder {
Graph* graph();
Node* String(const char* string);
- Node* MemSize(uint32_t offset);
+ Node* MemSize();
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index 01c1b860df..c739be5399 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler.h"
+#include "src/assembler-inl.h"
#include "src/base/lazy-instance.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index ae33e8c4b7..3215ec24f7 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -270,13 +270,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
class WasmOutOfLineTrap final : public OutOfLineCode {
public:
WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
- int32_t position, Instruction* instr)
+ int32_t position)
: OutOfLineCode(gen),
gen_(gen),
pc_(pc),
frame_elided_(frame_elided),
- position_(position),
- instr_(instr) {}
+ position_(position) {}
// TODO(eholk): Refactor this method to take the code generator as a
// parameter.
@@ -290,14 +289,17 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
__ Push(Smi::FromInt(trap_reason));
+ // TODO(eholk): use AssembleSourcePosition instead of passing in position_
+ // as a parameter. See AssembleArchTrap as an example. Consider sharing code
+ // with AssembleArchTrap.
__ Push(Smi::FromInt(position_));
- __ Move(rsi, gen_->isolate()->native_context());
+ __ Move(rsi, Smi::kZero);
__ CallRuntime(Runtime::kThrowWasmError);
- if (instr_->reference_map() != nullptr) {
- gen_->RecordSafepoint(instr_->reference_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- }
+ ReferenceMap* reference_map =
+ new (gen_->code()->zone()) ReferenceMap(gen_->code()->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
}
private:
@@ -305,18 +307,17 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
int pc_;
bool frame_elided_;
int32_t position_;
- Instruction* instr_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, size_t input_count,
- X64OperandConverter& i, int pc, Instruction* instr) {
+ X64OperandConverter& i, int pc) {
const X64MemoryProtection protection =
static_cast<X64MemoryProtection>(MiscField::decode(opcode));
if (protection == X64MemoryProtection::kProtected) {
const bool frame_elided = !codegen->frame_access_state()->has_frame();
const int32_t position = i.InputInt32(input_count - 1);
- new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position, instr);
+ new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position);
}
}
} // namespace
@@ -331,22 +332,27 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (0)
-
-#define ASSEMBLE_BINOP(asm_instr) \
- do { \
- if (HasImmediateInput(instr, 1)) { \
- if (instr->InputAt(0)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
- } else { \
- __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
- } \
- } else { \
- if (instr->InputAt(1)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
- } else { \
- __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
- } \
- } \
+#define ASSEMBLE_BINOP(asm_instr) \
+ do { \
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.InputRegister(0), right); \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } \
} while (0)
#define ASSEMBLE_COMPARE(asm_instr) \
@@ -704,6 +710,18 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
1); \
} while (false)
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov_inst(rax, i.MemoryOperand(1)); \
+ __ movl(i.TempRegister(0), rax); \
+ __ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
+ __ lock(); \
+ __ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
+ __ j(not_equal, &binop); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -1854,30 +1872,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movsxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1889,30 +1907,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1924,7 +1942,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64Movl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
@@ -1948,12 +1966,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
break;
case kX64Movq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -1968,7 +1986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -1979,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movsd:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset(), instr);
+ __ pc_offset());
if (instr->HasOutput()) {
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2136,37 +2154,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64Xchgb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchgb(i.InputRegister(index), operand);
- break;
- }
- case kX64Xchgw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchgw(i.InputRegister(index), operand);
- break;
- }
- case kX64Xchgl: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchgl(i.InputRegister(index), operand);
- break;
- }
- case kX64Int32x4Create: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputRegister(0));
- __ shufps(dst, dst, 0x0);
+ __ movd(dst, i.InputRegister(0));
+ __ pshufd(dst, dst, 0x0);
break;
}
- case kX64Int32x4ExtractLane: {
+ case kX64I32x4ExtractLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kX64Int32x4ReplaceLane: {
+ case kX64I32x4ReplaceLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
@@ -2176,16 +2175,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64Int32x4Add: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ case kX64I32x4Shl: {
+ __ pslld(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I32x4ShrS: {
+ __ psrad(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I32x4Add: {
__ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
- case kX64Int32x4Sub: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ case kX64I32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I32x4Mul: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4MinS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4MaxS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4Eq: {
+ __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4Ne: {
+ __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I32x4ShrU: {
+ __ psrld(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I32x4MinU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4MaxU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Zero: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ xorps(dst, dst);
+ break;
+ }
+ case kX64S32x4Select: {
+ // Mask used here is stored in dst.
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ andps(dst, kScratchDoubleReg);
+ __ xorps(dst, i.InputSimd128Register(2));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -2231,6 +2289,85 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
+ case kAtomicExchangeInt8: {
+ __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
+ __ movsxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeUint8: {
+ __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeInt16: {
+ __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
+ __ movsxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeUint16: {
+ __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kAtomicExchangeWord32: {
+ __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ break;
+ }
+ case kAtomicCompareExchangeInt8: {
+ __ lock();
+ __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
+ __ movsxbl(rax, rax);
+ break;
+ }
+ case kAtomicCompareExchangeUint8: {
+ __ lock();
+ __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzxbl(rax, rax);
+ break;
+ }
+ case kAtomicCompareExchangeInt16: {
+ __ lock();
+ __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
+ __ movsxwl(rax, rax);
+ break;
+ }
+ case kAtomicCompareExchangeUint16: {
+ __ lock();
+ __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzxwl(rax, rax);
+ break;
+ }
+ case kAtomicCompareExchangeWord32: {
+ __ lock();
+ __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
+ break;
+ }
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kAtomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
+ __ movsxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
+ __ movzxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
+ __ movsxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
+ __ movzxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
+ break;
+ ATOMIC_BINOP_CASE(Add, addl)
+ ATOMIC_BINOP_CASE(Sub, subl)
+ ATOMIC_BINOP_CASE(And, andl)
+ ATOMIC_BINOP_CASE(Or, orl)
+ ATOMIC_BINOP_CASE(Xor, xorl)
+#undef ATOMIC_BINOP_CASE
case kAtomicLoadInt8:
case kAtomicLoadUint8:
case kAtomicLoadInt16:
@@ -2437,7 +2574,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2605,6 +2744,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index aad172788e..0133f80d4b 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -143,14 +143,23 @@ namespace compiler {
V(X64Push) \
V(X64Poke) \
V(X64StackCheck) \
- V(X64Xchgb) \
- V(X64Xchgw) \
- V(X64Xchgl) \
- V(X64Int32x4Create) \
- V(X64Int32x4ExtractLane) \
- V(X64Int32x4ReplaceLane) \
- V(X64Int32x4Add) \
- V(X64Int32x4Sub)
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4ReplaceLane) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64S32x4Select) \
+ V(X64S128Zero)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 427e58083f..b66d853aba 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -123,11 +123,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
- case kX64Int32x4Create:
- case kX64Int32x4ExtractLane:
- case kX64Int32x4ReplaceLane:
- case kX64Int32x4Add:
- case kX64Int32x4Sub:
+ case kX64I32x4Splat:
+ case kX64I32x4ExtractLane:
+ case kX64I32x4ReplaceLane:
+ case kX64I32x4Shl:
+ case kX64I32x4ShrS:
+ case kX64I32x4Add:
+ case kX64I32x4Sub:
+ case kX64I32x4Mul:
+ case kX64I32x4MinS:
+ case kX64I32x4MaxS:
+ case kX64I32x4Eq:
+ case kX64I32x4Ne:
+ case kX64I32x4ShrU:
+ case kX64I32x4MinU:
+ case kX64I32x4MaxU:
+ case kX64S128Zero:
+ case kX64S32x4Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -178,11 +190,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke:
return kHasSideEffect;
- case kX64Xchgb:
- case kX64Xchgw:
- case kX64Xchgl:
- return kIsLoadOperation | kHasSideEffect;
-
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 7abdd9096c..89dc956318 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -58,10 +58,20 @@ class X64OperandGenerator final : public OperandGenerator {
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
+ case kX64And:
+ case kX64Or:
+ case kX64Xor:
+ case kX64Add:
+ case kX64Sub:
case kX64Push:
case kX64Cmp:
case kX64Test:
return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+ case kX64And32:
+ case kX64Or32:
+ case kX64Xor32:
+ case kX64Add32:
+ case kX64Sub32:
case kX64Cmp32:
case kX64Test32:
return rep == MachineRepresentation::kWord32;
@@ -507,7 +517,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand inputs[4];
+ InstructionOperand inputs[6];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
@@ -528,12 +538,26 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(left);
inputs[input_count++] = g.UseImmediate(right);
} else {
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
+ g.CanBeBetterLeftOperand(right) &&
+ (!g.CanBeBetterLeftOperand(left) ||
+ !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
std::swap(left, right);
}
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.Use(right);
+ if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
+ inputs[input_count++] = g.UseRegister(left);
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ } else {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
}
if (cont->IsBranch()) {
@@ -1308,6 +1332,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
+ V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
@@ -2246,13 +2271,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kX64Xchgb;
+ opcode = kAtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kX64Xchgw;
+ opcode = kAtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kX64Xchgl;
+ opcode = kAtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -2261,6 +2286,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
@@ -2269,41 +2295,243 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
- inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
-void InstructionSelector::VisitCreateInt32x4(Node* node) {
+void InstructionSelector::VisitAtomicExchange(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64Int32x4Create, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineSameAsFirst(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseFixed(old_value, rax);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, rax);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+void InstructionSelector::VisitAtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
X64OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
-}
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
-void InstructionSelector::VisitInt32x4Add(Node* node) {
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, rax);
+ InstructionOperand temp[1];
+ temp[0] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 1, temp);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitAtomic##op(Node* node) { \
+ VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
+ kAtomic##op##Int16, kAtomic##op##Uint16, \
+ kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+#define SIMD_TYPES(V) V(I32x4)
+
+#define SIMD_ZERO_OP_LIST(V) \
+ V(S128Zero) \
+ V(S1x4Zero) \
+ V(S1x8Zero) \
+ V(S1x16Zero)
+
+#define SIMD_SHIFT_OPCODES(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4MinU) \
+ V(I32x4MaxU)
+
+#define VISIT_SIMD_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
+ g.Use(node->InputAt(0))); \
+ }
+SIMD_TYPES(VISIT_SIMD_SPLAT)
+#undef VISIT_SIMD_SPLAT
+
+#define VISIT_SIMD_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node); \
+ Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
+ }
+SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+#undef VISIT_SIMD_EXTRACT_LANE
+
+#define VISIT_SIMD_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node); \
+ Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.Use(node->InputAt(1))); \
+ }
+SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+#undef VISIT_SIMD_REPLACE_LANE
+
+#define SIMD_VISIT_ZERO_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
+ }
+SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
+#undef SIMD_VISIT_ZERO_OP
+
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t value = OpParameter<int32_t>(node); \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
+ }
+SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
+#undef VISIT_SIMD_SHIFT
+
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ }
+SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
+#undef VISIT_SIMD_BINOP
+
+void InstructionSelector::VisitS32x4Select(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Emit(kX64S32x4Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
}
-void InstructionSelector::VisitInt32x4Sub(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
}
// static
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index fc5992a9c1..32f1019cd2 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -2540,6 +2540,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index ede0d45ce0..c11ac287d0 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -1833,6 +1833,14 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {