summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-10-15 20:17:08 +0200
committerMichaël Zasso <targos@protonmail.com>2020-10-18 20:16:47 +0200
commita1d639ba5de4ff34e34fb575fbb6cc1d41ec3cce (patch)
treeabc7d41c12f1495b1208fa4449cb2508c92c5e85 /deps/v8/src/codegen
parent089d654dd85f8e548597329f60a41d6029260caa (diff)
downloadnode-new-a1d639ba5de4ff34e34fb575fbb6cc1d41ec3cce.tar.gz
deps: update V8 to 8.6.395
PR-URL: https://github.com/nodejs/node/pull/35415 Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Daniel Bevenius <daniel.bevenius@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/DEPS4
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc90
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h13
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc34
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc72
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h19
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h17
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc6
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64-inl.h1
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc34
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h28
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc25
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h26
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h10
-rw-r--r--deps/v8/src/codegen/assembler.cc2
-rw-r--r--deps/v8/src/codegen/assembler.h19
-rw-r--r--deps/v8/src/codegen/code-comments.h2
-rw-r--r--deps/v8/src/codegen/code-factory.cc17
-rw-r--r--deps/v8/src/codegen/code-factory.h1
-rw-r--r--deps/v8/src/codegen/code-reference.h2
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc1662
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h938
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc104
-rw-r--r--deps/v8/src/codegen/compilation-cache.h68
-rw-r--r--deps/v8/src/codegen/compiler.cc890
-rw-r--r--deps/v8/src/codegen/compiler.h160
-rw-r--r--deps/v8/src/codegen/cpu-features.h5
-rw-r--r--deps/v8/src/codegen/external-reference.cc63
-rw-r--r--deps/v8/src/codegen/external-reference.h291
-rw-r--r--deps/v8/src/codegen/handler-table.cc9
-rw-r--r--deps/v8/src/codegen/handler-table.h17
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc56
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h14
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc35
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc26
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h35
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h1
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc43
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h447
-rw-r--r--deps/v8/src/codegen/machine-type.h40
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc16
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h36
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc51
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc4
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc21
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h36
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc51
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc3
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc144
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h269
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc24
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h91
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h382
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc34
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc86
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h9
-rw-r--r--deps/v8/src/codegen/register.h10
-rw-r--r--deps/v8/src/codegen/reloc-info.cc6
-rw-r--r--deps/v8/src/codegen/reloc-info.h16
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h11
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc34
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc6
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc31
-rw-r--r--deps/v8/src/codegen/safepoint-table.h50
-rw-r--r--deps/v8/src/codegen/signature.h2
-rw-r--r--deps/v8/src/codegen/source-position-table.cc22
-rw-r--r--deps/v8/src/codegen/source-position-table.h6
-rw-r--r--deps/v8/src/codegen/tick-counter.cc13
-rw-r--r--deps/v8/src/codegen/tick-counter.h15
-rw-r--r--deps/v8/src/codegen/tnode.h9
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.h1
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc217
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h114
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc54
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc10
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h10
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h1
78 files changed, 3979 insertions, 3244 deletions
diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS
index ca53b61541..67e29bc97a 100644
--- a/deps/v8/src/codegen/DEPS
+++ b/deps/v8/src/codegen/DEPS
@@ -4,8 +4,10 @@
specific_include_rules = {
"external-reference.cc": [
- # Required to call IrregexpInterpreter::NativeMatch from builtin.
+ # Required to call into IrregexpInterpreter and RegexpExperimental from
+ # builtin.
"+src/regexp/regexp-interpreter.h",
+ "+src/regexp/experimental/experimental.h",
"+src/regexp/regexp-macro-assembler-arch.h",
],
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 9032714f57..00d0644f73 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -2621,16 +2621,28 @@ static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
*hi = i >> 32;
}
+static void WriteVmovIntImmEncoding(uint8_t imm, uint32_t* encoding) {
+ // Integer promotion from uint8_t to int makes these all okay.
+ *encoding = ((imm & 0x80) << (24 - 7)); // a
+ *encoding |= ((imm & 0x70) << (16 - 4)); // bcd
+ *encoding |= (imm & 0x0f); // efgh
+}
+
// This checks if imm can be encoded into an immediate for vmov.
// See Table A7-15 in ARM DDI 0406C.d.
-// Currently only supports the first row of the table.
-static bool FitsVmovImm64(uint64_t imm, uint32_t* encoding) {
+// Currently only supports the first row and op=0 && cmode=1110.
+static bool FitsVmovIntImm(uint64_t imm, uint32_t* encoding, uint8_t* cmode) {
uint32_t lo = imm & 0xFFFFFFFF;
uint32_t hi = imm >> 32;
- if (lo == hi && ((lo & 0xffffff00) == 0)) {
- *encoding = ((lo & 0x80) << (24 - 7)); // a
- *encoding |= ((lo & 0x70) << (16 - 4)); // bcd
- *encoding |= (lo & 0x0f); // efgh
+ if ((lo == hi && ((lo & 0xffffff00) == 0))) {
+ WriteVmovIntImmEncoding(imm & 0xff, encoding);
+ *cmode = 0;
+ return true;
+ } else if ((lo == hi) && ((lo & 0xffff) == (lo >> 16)) &&
+ ((lo & 0xff) == (lo >> 24))) {
+ // Check that all bytes in imm are the same.
+ WriteVmovIntImmEncoding(imm & 0xff, encoding);
+ *cmode = 0xe;
return true;
}
@@ -2639,15 +2651,17 @@ static bool FitsVmovImm64(uint64_t imm, uint32_t* encoding) {
void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) {
uint32_t enc;
- if (CpuFeatures::IsSupported(VFPv3) && FitsVmovImm64(imm, &enc)) {
+ uint8_t cmode;
+ uint8_t op = 0;
+ if (CpuFeatures::IsSupported(VFPv3) && FitsVmovIntImm(imm, &enc, &cmode)) {
CpuFeatureScope scope(this, VFPv3);
// Instruction details available in ARM DDI 0406C.b, A8-937.
// 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8)
// | 0(7) | Q(6) | op(5) | 4(1) | imm4(3-0)
int vd, d;
dst.split_code(&vd, &d);
- emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | 0x1 * B6 |
- 0x1 * B4 | enc);
+ emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 |
+ 0x1 * B6 | op * B5 | 0x1 * B4 | enc);
} else {
UNIMPLEMENTED();
}
@@ -3892,7 +3906,18 @@ void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
-enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
+enum UnaryOp {
+ VMVN,
+ VSWP,
+ VABS,
+ VABSF,
+ VNEG,
+ VNEGF,
+ VRINTM,
+ VRINTN,
+ VRINTP,
+ VRINTZ
+};
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
int dst_code, int src_code) {
@@ -3920,6 +3945,18 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x7 * B7;
break;
+ case VRINTM:
+ op_encoding = B17 | 0xD * B7;
+ break;
+ case VRINTN:
+ op_encoding = B17 | 0x8 * B7;
+ break;
+ case VRINTP:
+ op_encoding = B17 | 0xF * B7;
+ break;
+ case VRINTZ:
+ op_encoding = B17 | 0xB * B7;
+ break;
default:
UNREACHABLE();
}
@@ -4315,7 +4352,6 @@ void Assembler::vmull(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src1,
src2.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
- if (!u) UNIMPLEMENTED();
emit(0xFU * B28 | B25 | u * B24 | B23 | d * B22 | size * B20 | vn * B16 |
vd * B12 | 0xC * B8 | n * B7 | m * B5 | vm);
}
@@ -4575,6 +4611,38 @@ void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
}
+void Assembler::vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards -Infinity.
+ // See ARM DDI 0487F.b, F6-5493.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTM, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintn(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer to Nearest.
+ // See ARM DDI 0487F.b, F6-5497.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTN, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards +Infinity.
+ // See ARM DDI 0487F.b, F6-5501.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTP, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards Zero.
+ // See ARM DDI 0487F.b, F6-5511.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTZ, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 61205760df..18631e2ece 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -820,7 +820,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- // ARMv8 rounding instructions.
+ // ARMv8 rounding instructions (Scalar).
void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
@@ -908,6 +908,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
+
+ // ARMv8 rounding instructions (NEON).
+ void vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintn(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
QwNeonRegister shift);
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 5a4e08dc77..b457376610 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return r1;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r0; }
-
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
const Register LoadDescriptor::SlotRegister() { return r0; }
@@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@@ -295,6 +285,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 7e5fa8cef1..7b9e73e1d9 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 97a57d6f3c..2e21ab913d 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -41,19 +41,66 @@
namespace v8 {
namespace internal {
+namespace {
+
+#ifdef USE_SIMULATOR
+static unsigned SimulatorFeaturesFromCommandLine() {
+ if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) {
+ return 0;
+ }
+ if (strcmp(FLAG_sim_arm64_optional_features, "all") == 0) {
+ return (1u << NUMBER_OF_CPU_FEATURES) - 1;
+ }
+ fprintf(
+ stderr,
+ "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n",
+ FLAG_sim_arm64_optional_features);
+ fprintf(stderr,
+ "Supported values are: none\n"
+ " all\n");
+ FATAL("sim-arm64-optional-features");
+}
+#endif // USE_SIMULATOR
+
+static constexpr unsigned CpuFeaturesFromCompiler() {
+ unsigned features = 0;
+#if defined(__ARM_FEATURE_JCVT)
+ features |= 1u << JSCVT;
+#endif
+ return features;
+}
+
+} // namespace
+
// -----------------------------------------------------------------------------
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
- // AArch64 has no configuration options, no further probing is required.
- supported_ = 0;
-
// Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
+ if (cross_compile) {
+ supported_ |= CpuFeaturesFromCompiler();
+ return;
+ }
// We used to probe for coherent cache support, but on older CPUs it
// causes crashes (crbug.com/524337), and newer CPUs don't even have
// the feature any more.
+
+#ifdef USE_SIMULATOR
+ supported_ |= SimulatorFeaturesFromCommandLine();
+#else
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ unsigned runtime = 0;
+ if (cpu.has_jscvt()) {
+ runtime |= 1u << JSCVT;
+ }
+
+ // Use the best of the features found by CPU detection and those inferred from
+ // the build system.
+ supported_ |= CpuFeaturesFromCompiler();
+ supported_ |= runtime;
+#endif // USE_SIMULATOR
}
void CpuFeatures::PrintTarget() {}
@@ -1115,10 +1162,10 @@ void Assembler::cls(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, CLS);
}
-void Assembler::pacia1716() { Emit(PACIA1716); }
-void Assembler::autia1716() { Emit(AUTIA1716); }
-void Assembler::paciasp() { Emit(PACIASP); }
-void Assembler::autiasp() { Emit(AUTIASP); }
+void Assembler::pacib1716() { Emit(PACIB1716); }
+void Assembler::autib1716() { Emit(AUTIB1716); }
+void Assembler::pacibsp() { Emit(PACIBSP); }
+void Assembler::autibsp() { Emit(AUTIBSP); }
void Assembler::bti(BranchTargetIdentifier id) {
SystemHint op;
@@ -1136,9 +1183,9 @@ void Assembler::bti(BranchTargetIdentifier id) {
op = BTI_jc;
break;
case BranchTargetIdentifier::kNone:
- case BranchTargetIdentifier::kPaciasp:
+ case BranchTargetIdentifier::kPacibsp:
// We always want to generate a BTI instruction here, so disallow
- // skipping its generation or generating a PACIASP instead.
+ // skipping its generation or generating a PACIBSP instead.
UNREACHABLE();
}
hint(op);
@@ -2714,6 +2761,11 @@ void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
}
+void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(rd.IsW() && vn.Is1D());
+ Emit(FJCVTZS | Rn(vn) | Rd(rd));
+}
+
#define NEON_FP2REGMISC_FCVT_LIST(V) \
V(fcvtnu, NEON_FCVTNU, FCVTNU) \
V(fcvtns, NEON_FCVTNS, FCVTNS) \
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index a9e8a5e85a..f787bad464 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -780,21 +780,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void clz(const Register& rd, const Register& rn);
void cls(const Register& rd, const Register& rn);
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in x17 and modifier in x16 [Armv8.3].
- void pacia1716();
+ void pacib1716();
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in LR and modifier in SP [Armv8.3].
- void paciasp();
+ void pacibsp();
- // Authenticate Instruction address, using key A, with address in x17 and
+ // Authenticate Instruction address, using key B, with address in x17 and
// modifier in x16 [Armv8.3].
- void autia1716();
+ void autib1716();
- // Authenticate Instruction address, using key A, with address in LR and
+ // Authenticate Instruction address, using key B, with address in LR and
// modifier in SP [Armv8.3].
- void autiasp();
+ void autibsp();
// Memory instructions.
@@ -1750,6 +1750,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP convert to signed integer, nearest with ties to even.
void fcvtns(const Register& rd, const VRegister& vn);
+ // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
+ void fjcvtzs(const Register& rd, const VRegister& vn);
+
// FP convert to unsigned integer, nearest with ties to even.
void fcvtnu(const Register& rd, const VRegister& vn);
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index e63962993a..52790b9faf 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -412,9 +412,9 @@ enum class BranchTargetIdentifier {
// Emit a "BTI jc" instruction, which is a combination of "BTI j" and "BTI c".
kBtiJumpCall,
- // Emit a PACIASP instruction, which acts like a "BTI c" or a "BTI jc", based
- // on the value of SCTLR_EL1.BT0.
- kPaciasp
+ // Emit a PACIBSP instruction, which acts like a "BTI c" or a "BTI jc",
+ // based on the value of SCTLR_EL1.BT0.
+ kPacibsp
};
enum BarrierDomain {
@@ -793,10 +793,10 @@ enum SystemPAuthOp : uint32_t {
SystemPAuthFixed = 0xD503211F,
SystemPAuthFMask = 0xFFFFFD1F,
SystemPAuthMask = 0xFFFFFFFF,
- PACIA1716 = SystemPAuthFixed | 0x00000100,
- AUTIA1716 = SystemPAuthFixed | 0x00000180,
- PACIASP = SystemPAuthFixed | 0x00000320,
- AUTIASP = SystemPAuthFixed | 0x000003A0
+ PACIB1716 = SystemPAuthFixed | 0x00000140,
+ AUTIB1716 = SystemPAuthFixed | 0x000001C0,
+ PACIBSP = SystemPAuthFixed | 0x00000360,
+ AUTIBSP = SystemPAuthFixed | 0x000003E0
};
// Any load or store (including pair).
@@ -1325,7 +1325,8 @@ enum FPIntegerConvertOp : uint32_t {
FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
- FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000,
+ FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000
};
// Conversion between fixed point and floating point.
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index 32bcc6f268..d7bd4834b0 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -9,6 +9,10 @@
#include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/cpu-features.h"
+#if V8_OS_MACOSX
+#include <libkern/OSCacheControl.h>
+#endif
+
namespace v8 {
namespace internal {
@@ -41,6 +45,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#if defined(V8_HOST_ARCH_ARM64)
#if defined(V8_OS_WIN)
::FlushInstructionCache(GetCurrentProcess(), address, length);
+#elif defined(V8_OS_MACOSX)
+ sys_icache_invalidate(address, length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
diff --git a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
index 25d69b3898..1a7d483dea 100644
--- a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
@@ -538,7 +538,6 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
- (instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 9f05922444..61c8947bd4 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return x1;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return x0; }
-
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
const Register LoadDescriptor::SlotRegister() { return x0; }
@@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@@ -299,6 +289,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 93b8136d9a..56be64693d 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -318,23 +318,15 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
// instructions between the bind and the target identifier instruction.
InstructionAccurateScope scope(this, 1);
bind(label);
- if (id == BranchTargetIdentifier::kPaciasp) {
- paciasp();
+ if (id == BranchTargetIdentifier::kPacibsp) {
+ pacibsp();
} else {
bti(id);
}
}
}
-void TurboAssembler::CodeEntry() {
- // Since `kJavaScriptCallCodeStartRegister` is the target register for tail
- // calls, we have to allow for jumps too, with "BTI jc". We also allow the
- // register allocator to pick the target register for calls made from
- // WebAssembly.
- // TODO(v8:10026): Consider changing this so that we can use CallTarget(),
- // which maps to "BTI c", here instead.
- JumpOrCallTarget();
-}
+void TurboAssembler::CodeEntry() { CallTarget(); }
void TurboAssembler::ExceptionHandler() { JumpTarget(); }
@@ -1136,7 +1128,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1153,7 +1145,7 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr)));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1188,7 +1180,7 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1199,7 +1191,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1228,7 +1220,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontLoadLR), (dst != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1238,7 +1230,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1280,7 +1272,7 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR && contains_lr) {
- Autiasp();
+ Autibsp();
}
#endif
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index c157df2996..2d3e27e530 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1197,7 +1197,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
void MacroAssembler::PushCalleeSavedRegisters() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Paciasp();
+ Pacibsp();
#endif
{
@@ -1249,7 +1249,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Autiasp();
+ Autibsp();
#endif
}
@@ -1953,7 +1953,13 @@ void TurboAssembler::CallCodeObject(Register code_object) {
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
- Jump(code_object);
+
+ UseScratchRegisterScope temps(this);
+ if (code_object != x17) {
+ temps.Exclude(x17);
+ Mov(x17, code_object);
+ }
+ Jump(x17);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@@ -1971,7 +1977,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Adr(x17, &return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, sp, kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Poke(x17, 0);
@@ -2263,6 +2269,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DoubleRegister double_input,
StubCallMode stub_mode,
LinkRegisterStatus lr_status) {
+ if (CpuFeatures::IsSupported(JSCVT)) {
+ Fjcvtzs(result.W(), double_input);
+ return;
+ }
+
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2650,7 +2661,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
@@ -3243,7 +3254,7 @@ void TurboAssembler::RestoreFPAndLR() {
// We can load the return address directly into x17.
Add(x16, fp, StandardFrameConstants::kCallerSPOffset);
Ldp(fp, x17, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- Autia1716();
+ Autib1716();
Mov(lr, x17);
#else
Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -3256,7 +3267,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
Adr(x17, return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 109e73c3c2..0cb9e82319 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -503,13 +503,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
- void Paciasp() {
+ void Pacibsp() {
DCHECK(allow_macro_instructions_);
- paciasp();
+ pacibsp();
}
- void Autiasp() {
+ void Autibsp() {
DCHECK(allow_macro_instructions_);
- autiasp();
+ autibsp();
}
// The 1716 pac and aut instructions encourage people to use x16 and x17
@@ -519,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Register temp = temps.AcquireX(); // temp will be x16
// __ Mov(x17, ptr);
// __ Mov(x16, modifier); // Will override temp!
- // __ Pacia1716();
+ // __ Pacib1716();
//
// To work around this issue, you must exclude x16 and x17 from the scratch
// register list. You may need to replace them with other registers:
@@ -529,18 +529,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// temps.Include(x10, x11);
// __ Mov(x17, ptr);
// __ Mov(x16, modifier);
- // __ Pacia1716();
- void Pacia1716() {
+ // __ Pacib1716();
+ void Pacib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- pacia1716();
+ pacib1716();
}
- void Autia1716() {
+ void Autib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- autia1716();
+ autib1716();
}
inline void Dmb(BarrierDomain domain, BarrierType type);
@@ -1009,6 +1009,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
fcvtzs(vd, vn, fbits);
}
+ void Fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ DCHECK(!rd.IsZero());
+ fjcvtzs(rd, vn);
+ }
+
inline void Fcvtzu(const Register& rd, const VRegister& fn);
void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions());
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 090d5424b3..76bf3049c8 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -92,9 +92,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValid(code, size, type));
-#endif
+ CONSTEXPR_DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
@@ -304,9 +302,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValidLaneCount(lane_count));
-#endif
+ CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
@@ -523,8 +519,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 3b27bf5db9..3d0b7d28e4 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -81,7 +81,7 @@ namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
public:
explicit DefaultAssemblerBuffer(int size)
- : buffer_(OwnedVector<uint8_t>::New(size)) {
+ : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {
#ifdef DEBUG
ZapCode(reinterpret_cast<Address>(buffer_.start()), size);
#endif
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 1c287222e9..6419e55cec 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -78,10 +78,16 @@ class JumpOptimizationInfo {
public:
bool is_collecting() const { return stage_ == kCollection; }
bool is_optimizing() const { return stage_ == kOptimization; }
- void set_optimizing() { stage_ = kOptimization; }
+ void set_optimizing() {
+ DCHECK(is_optimizable());
+ stage_ = kOptimization;
+ }
bool is_optimizable() const { return optimizable_; }
- void set_optimizable() { optimizable_ = true; }
+ void set_optimizable() {
+ DCHECK(is_collecting());
+ optimizable_ = true;
+ }
// Used to verify the instruction sequence is always the same in two stages.
size_t hash_code() const { return hash_code_; }
@@ -251,6 +257,15 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
+ int pc_offset_for_safepoint() {
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ // Mips needs it's own implementation to avoid trampoline's influence.
+ UNREACHABLE();
+#else
+ return pc_offset();
+#endif
+ }
+
byte* buffer_start() const { return buffer_->start(); }
int buffer_size() const { return buffer_->size(); }
int instruction_size() const { return pc_offset(); }
diff --git a/deps/v8/src/codegen/code-comments.h b/deps/v8/src/codegen/code-comments.h
index f366cd5547..5866296051 100644
--- a/deps/v8/src/codegen/code-comments.h
+++ b/deps/v8/src/codegen/code-comments.h
@@ -20,7 +20,7 @@ class Assembler;
// Code comments section layout:
// byte count content
// ------------------------------------------------------------------------
-// 4 size as uint32_t (only for sanity check)
+// 4 size as uint32_t (only for a check)
// [Inline array of CodeCommentEntry in increasing pc_offset order]
// ┌ 4 pc_offset of entry as uint32_t
// ├ 4 length of the comment including terminating '\0'
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 060a66edc7..006b6bee16 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -268,6 +268,23 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
}
// static
+Callable CodeFactory::Call_WithFeedback(Isolate* isolate,
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNotNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kAny:
+ return Builtins::CallableFor(isolate,
+ Builtins::kCall_ReceiverIsAny_WithFeedback);
+ }
+ UNREACHABLE();
+}
+
+// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike);
}
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index b8d294ce71..02fc7e4b23 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable Call_WithFeedback(Isolate* isolate, ConvertReceiverMode mode);
static Callable CallWithArrayLike(Isolate* isolate);
static Callable CallWithSpread(Isolate* isolate);
static Callable CallFunction(
diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h
index 4326cf0b96..8ff3581689 100644
--- a/deps/v8/src/codegen/code-reference.h
+++ b/deps/v8/src/codegen/code-reference.h
@@ -16,7 +16,7 @@ class CodeDesc;
namespace wasm {
class WasmCode;
-}
+} // namespace wasm
class CodeReference {
public:
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 901ce0c7b4..5a8d0bad03 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -20,7 +20,6 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
-#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -109,7 +108,11 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
branch(&ok, &not_ok);
BIND(&not_ok);
- FailAssert(message, file, line, extra_nodes);
+ std::vector<FileAndLine> file_and_line;
+ if (file != nullptr) {
+ file_and_line.push_back({file, line});
+ }
+ FailAssert(message, file_and_line, extra_nodes);
BIND(&ok);
Comment("] Assert");
@@ -136,17 +139,6 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
Check(branch, message, file, line, extra_nodes);
}
-template <>
-TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) {
- return SmiTag(value);
-}
-template <>
-TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
- TNode<IntPtrT> value) {
- return value;
-}
-
-
void CodeStubAssembler::IncrementCallCount(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
Comment("increment call count");
@@ -171,12 +163,24 @@ void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
}
void CodeStubAssembler::FailAssert(
- const char* message, const char* file, int line,
+ const char* message, const std::vector<FileAndLine>& files_and_lines,
std::initializer_list<ExtraNode> extra_nodes) {
DCHECK_NOT_NULL(message);
EmbeddedVector<char, 1024> chars;
- if (file != nullptr) {
- SNPrintF(chars, "%s [%s:%d]", message, file, line);
+ std::stringstream stream;
+ for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend(); ++it) {
+ if (it->first != nullptr) {
+ stream << " [" << it->first << ":" << it->second << "]";
+#ifndef DEBUG
+ // To limit the size of these strings in release builds, we include only
+ // the innermost macro's file name and line number.
+ break;
+#endif
+ }
+ }
+ std::string files_and_lines_text = stream.str();
+ if (files_and_lines_text.size() != 0) {
+ SNPrintF(chars, "%s%s", message, files_and_lines_text.c_str());
message = chars.begin();
}
TNode<String> message_node = StringConstant(message);
@@ -283,42 +287,6 @@ TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) {
return ReinterpretCast<RawPtrT>(IntPtrConstant(value));
}
-Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiConstant(value);
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return IntPtrConstant(value);
- }
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) {
- Smi smi_test;
- if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) {
- int32_t constant_test;
- if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
- ParameterMode mode) {
- if (mode == INTPTR_PARAMETERS) {
- return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test));
- } else {
- DCHECK_EQ(mode, SMI_PARAMETERS);
- return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test));
- }
- return false;
-}
-
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
int* value,
ParameterMode mode) {
@@ -586,22 +554,14 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_x.value());
}
-TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
- if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) {
- // Check that the Smi value is zero in the lower bits.
- TNode<IntPtrT> value = BitcastTaggedToWordForTagAndSmiBits(smi);
- return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value));
- }
- return Int32TrueConstant();
+template <>
+TNode<Smi> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return value;
}
-TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
- if (COMPRESS_POINTERS_BOOL) {
- return WordEqual(
- BitcastTaggedToWordForTagAndSmiBits(smi),
- BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi)));
- }
- return Int32TrueConstant();
+template <>
+TNode<IntPtrT> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return SmiUntag(value);
}
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
@@ -1099,15 +1059,8 @@ void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
- TNode<FixedDoubleArray> array, TNode<Smi> index, Label* if_hole) {
- return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
- SMI_PARAMETERS, if_hole);
-}
-
-TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
TNode<FixedDoubleArray> array, TNode<IntPtrT> index, Label* if_hole) {
- return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
- INTPTR_PARAMETERS, if_hole);
+ return LoadFixedDoubleArrayElement(array, index, if_hole);
}
void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object,
@@ -1430,14 +1383,14 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
-Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
- int offset, MachineType type) {
+Node* CodeStubAssembler::LoadObjectField(TNode<HeapObject> object, int offset,
+ MachineType type) {
CSA_ASSERT(this, IsStrong(object));
return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag));
}
-Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
- SloppyTNode<IntPtrT> offset,
+Node* CodeStubAssembler::LoadObjectField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
MachineType type) {
CSA_ASSERT(this, IsStrong(object));
return LoadFromObject(type, object,
@@ -2006,6 +1959,43 @@ CodeStubAssembler::LoadArrayElement<DescriptorArray>(TNode<DescriptorArray>,
ParameterMode,
LoadSensitivity);
+template <typename TIndex>
+TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
+ TNode<FixedArray> object, TNode<TIndex> index, int additional_offset,
+ LoadSensitivity needs_poisoning, CheckBounds check_bounds) {
+ // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, UintPtrT>::value ||
+ std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi, UintPtrT or IntPtrT indexes are allowed");
+ CSA_ASSERT(this, IsFixedArraySubclass(object));
+ CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
+
+ ParameterMode parameter_mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
+ if (NeedsBoundsCheck(check_bounds)) {
+ FixedArrayBoundsCheck(object, index, additional_offset, parameter_mode);
+ }
+ TNode<MaybeObject> element =
+ LoadArrayElement(object, FixedArray::kHeaderSize, index,
+ additional_offset, parameter_mode, needs_poisoning);
+ return CAST(element);
+}
+
+template V8_EXPORT_PRIVATE TNode<Object>
+CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>,
+ int, LoadSensitivity,
+ CheckBounds);
+template V8_EXPORT_PRIVATE TNode<Object>
+CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>,
+ TNode<UintPtrT>, int,
+ LoadSensitivity,
+ CheckBounds);
+template V8_EXPORT_PRIVATE TNode<Object>
+CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>,
+ TNode<IntPtrT>, int,
+ LoadSensitivity, CheckBounds);
+
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
Node* index,
int additional_offset,
@@ -2036,22 +2026,6 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
}
}
-TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
- TNode<FixedArray> object, Node* index_node, int additional_offset,
- ParameterMode parameter_mode, LoadSensitivity needs_poisoning,
- CheckBounds check_bounds) {
- CSA_ASSERT(this, IsFixedArraySubclass(object));
- CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
- if (NeedsBoundsCheck(check_bounds)) {
- FixedArrayBoundsCheck(object, index_node, additional_offset,
- parameter_mode);
- }
- TNode<MaybeObject> element =
- LoadArrayElement(object, FixedArray::kHeaderSize, index_node,
- additional_offset, parameter_mode, needs_poisoning);
- return CAST(element);
-}
-
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
TNode<PropertyArray> object, SloppyTNode<IntPtrT> index) {
int additional_offset = 0;
@@ -2382,9 +2356,8 @@ template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
template <typename Array>
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
- TNode<Array> object, int array_header_size, Node* index_node,
- int additional_offset, ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
+ TNode<Array> object, int array_header_size, TNode<IntPtrT> index,
+ int additional_offset) {
DCHECK(IsAligned(additional_offset, kTaggedSize));
int endian_correction = 0;
#if V8_TARGET_LITTLE_ENDIAN
@@ -2392,8 +2365,8 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
#endif
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag +
endian_correction;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
- parameter_mode, header_size);
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size);
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object),
array_header_size + endian_correction));
if (SmiValuesAre32Bits()) {
@@ -2404,32 +2377,25 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
- TNode<FixedArray> object, Node* index_node, int additional_offset,
- ParameterMode parameter_mode) {
+ TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset) {
CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize,
- index_node, additional_offset,
- parameter_mode);
+ index, additional_offset);
}
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset,
- ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
+ TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, parameter_mode, needs_poisoning);
+ additional_offset, INTPTR_PARAMETERS,
+ LoadSensitivity::kSafe);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
- SloppyTNode<FixedDoubleArray> object, Node* index_node,
- MachineType machine_type, int additional_offset,
- ParameterMode parameter_mode, Label* if_hole) {
- CSA_ASSERT(this, IsFixedDoubleArray(object));
- DCHECK(IsAligned(additional_offset, kTaggedSize));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
- int32_t header_size =
- FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size);
+ TNode<FixedDoubleArray> object, TNode<IntPtrT> index, Label* if_hole,
+ MachineType machine_type) {
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size);
CSA_ASSERT(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
@@ -2478,16 +2444,15 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
BIND(&if_packed_double);
{
- var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
- CAST(elements), index, MachineType::Float64()));
+ var_result = AllocateHeapNumberWithValue(
+ LoadFixedDoubleArrayElement(CAST(elements), index));
Goto(&done);
}
BIND(&if_holey_double);
{
- var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
- CAST(elements), index, MachineType::Float64(), 0, INTPTR_PARAMETERS,
- if_hole));
+ var_result = AllocateHeapNumberWithValue(
+ LoadFixedDoubleArrayElement(CAST(elements), index, if_hole));
Goto(&done);
}
@@ -2519,7 +2484,7 @@ TNode<BoolT> CodeStubAssembler::IsDoubleHole(TNode<Object> base,
}
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
- SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
+ TNode<Object> base, TNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type) {
if (if_hole) {
GotoIf(IsDoubleHole(base, offset), if_hole);
@@ -2542,41 +2507,6 @@ TNode<BoolT> CodeStubAssembler::LoadScopeInfoHasExtensionField(
return IsSetWord<ScopeInfo::HasContextExtensionSlotBit>(value);
}
-TNode<Object> CodeStubAssembler::LoadContextElement(
- SloppyTNode<Context> context, int slot_index) {
- int offset = Context::SlotOffset(slot_index);
- return Load<Object>(context, IntPtrConstant(offset));
-}
-
-TNode<Object> CodeStubAssembler::LoadContextElement(
- SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
- Context::SlotOffset(0));
- return Load<Object>(context, offset);
-}
-
-TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
- TNode<Smi> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
- Context::SlotOffset(0));
- return Load<Object>(context, offset);
-}
-
-void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
- int slot_index,
- SloppyTNode<Object> value) {
- int offset = Context::SlotOffset(slot_index);
- Store(context, IntPtrConstant(offset), value);
-}
-
-void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
- SloppyTNode<IntPtrT> slot_index,
- SloppyTNode<Object> value) {
- TNode<IntPtrT> offset = IntPtrAdd(TimesTaggedSize(slot_index),
- IntPtrConstant(Context::SlotOffset(0)));
- Store(context, offset, value);
-}
-
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
@@ -2893,19 +2823,18 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Context> context,
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
- ParameterMode mode, ElementsKind kind, TNode<HeapObject> array,
- Node* length, TVariable<FixedArrayBase>* var_elements, Node* growth,
+ ElementsKind kind, TNode<HeapObject> array, TNode<BInt> length,
+ TVariable<FixedArrayBase>* var_elements, TNode<BInt> growth,
Label* bailout) {
Label fits(this, var_elements);
- Node* capacity =
- TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
- // length and growth nodes are already in a ParameterMode appropriate
- // representation.
- Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
- GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
- Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+ TNode<BInt> capacity =
+ TaggedToParameter<BInt>(LoadFixedArrayBaseLength(var_elements->value()));
+
+ TNode<BInt> new_length = IntPtrOrSmiAdd(growth, length);
+ GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity), &fits);
+ TNode<BInt> new_capacity = CalculateNewElementsCapacity(new_length);
*var_elements = GrowElementsCapacity(array, var_elements->value(), kind, kind,
- capacity, new_capacity, mode, bailout);
+ capacity, new_capacity, bailout);
Goto(&fits);
BIND(&fits);
}
@@ -2919,15 +2848,14 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
Label pre_bailout(this);
Label success(this);
TVARIABLE(Smi, var_tagged_length);
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, &pre_bailout);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
@@ -2936,8 +2864,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
args->ForEach(
push_vars,
[&](TNode<Object> arg) {
- TryStoreArrayElement(kind, mode, &pre_bailout, elements,
- var_length.value(), arg);
+ TryStoreArrayElement(kind, &pre_bailout, elements, var_length.value(),
+ arg);
Increment(&var_length);
},
first);
@@ -2950,7 +2878,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
BIND(&pre_bailout);
{
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = ParameterToTagged(var_length.value());
var_tagged_length = length;
TNode<Smi> diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
@@ -2962,15 +2890,17 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
return var_tagged_length.value();
}
-void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
- ParameterMode mode, Label* bailout,
+void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, Label* bailout,
TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value) {
+ TNode<BInt> index,
+ TNode<Object> value) {
if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
} else if (IsDoubleElementsKind(kind)) {
GotoIfNotNumber(value, bailout);
}
+
+ ParameterMode mode = OptimalParameterMode();
if (IsDoubleElementsKind(kind)) {
StoreElement(elements, kind, index, ChangeNumberToFloat64(CAST(value)),
mode);
@@ -2984,19 +2914,18 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Object> value,
Label* bailout) {
Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
- Node* growth = IntPtrOrSmiConstant(1, mode);
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, bailout);
+ TNode<BInt> growth = IntPtrOrSmiConstant<BInt>(1);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
- TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
- var_length.value(), value);
+ TryStoreArrayElement(kind, bailout, var_elements.value(), var_length.value(),
+ value);
Increment(&var_length);
TNode<Smi> length = BIntToSmi(var_length.value());
@@ -3335,7 +3264,7 @@ TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
AllocateNameDictionaryWithCapacity(capacity);
TNode<IntPtrT> length = SmiUntag(LoadFixedArrayBaseLength(dictionary));
CopyFixedArrayElements(PACKED_ELEMENTS, dictionary, properties, length,
- SKIP_WRITE_BARRIER, INTPTR_PARAMETERS);
+ SKIP_WRITE_BARRIER);
return properties;
}
@@ -3552,12 +3481,13 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3571,8 +3501,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
+ int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3608,7 +3539,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&nonempty);
{
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3680,7 +3611,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
@@ -3691,9 +3623,9 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize),
- allocation_site);
+ *allocation_site);
}
return CAST(array);
@@ -3701,11 +3633,10 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
- TNode<Smi> length, TNode<AllocationSite> allocation_site,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- ParameterMode capacity_mode = INTPTR_PARAMETERS;
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
@@ -3718,9 +3649,8 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
BIND(&nonempty);
{
- FillFixedArrayWithValue(kind, elements,
- IntPtrOrSmiConstant(0, capacity_mode), capacity,
- RootIndex::kTheHoleValue, capacity_mode);
+ FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
+ RootIndex::kTheHoleValue, INTPTR_PARAMETERS);
Goto(&out);
}
@@ -3728,9 +3658,10 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return array;
}
-TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
- TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count,
- ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) {
+TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(TNode<Context> context,
+ TNode<JSArray> array,
+ TNode<BInt> begin,
+ TNode<BInt> count) {
TNode<Map> original_array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
@@ -3739,23 +3670,24 @@ TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
TNode<FixedArrayBase> new_elements = ExtractFixedArray(
- LoadElements(array), begin, count, capacity,
- ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
+ LoadElements(array), base::Optional<TNode<BInt>>(begin),
+ base::Optional<TNode<BInt>>(count),
+ base::Optional<TNode<BInt>>(base::nullopt),
+ ExtractFixedArrayFlag::kAllFixedArrays, nullptr, elements_kind);
TNode<JSArray> result = AllocateJSArray(
- array_map, new_elements, ParameterToTagged(count, mode), allocation_site);
+ array_map, new_elements, ParameterToTagged(count), base::nullopt);
return result;
}
TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ HoleConversionMode convert_holes) {
// TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
// function is also used to copy boilerplates even when the no-elements
// protector is invalid. This function should be renamed to reflect its uses.
- // TODO(v8:9708): remove ParameterMode
- ParameterMode mode = OptimalParameterMode();
TNode<Number> length = LoadJSArrayLength(array);
TNode<FixedArrayBase> new_elements;
TVARIABLE(FixedArrayBase, var_new_elements);
@@ -3773,11 +3705,13 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
}
// Simple extraction that preserves holes.
- new_elements =
- ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
- ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
- nullptr, var_elements_kind.value());
+ new_elements = ExtractFixedArray(
+ LoadElements(array),
+ base::Optional<TNode<BInt>>(IntPtrOrSmiConstant<BInt>(0)),
+ base::Optional<TNode<BInt>>(TaggedToParameter<BInt>(CAST(length))),
+ base::Optional<TNode<BInt>>(base::nullopt),
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, nullptr,
+ var_elements_kind.value());
var_new_elements = new_elements;
Goto(&allocate_jsarray);
@@ -3792,9 +3726,11 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
// PACKED_ELEMENTS. Also, if we want to replace holes, we must not use
// ExtractFixedArrayFlag::kDontCopyCOW.
new_elements = ExtractFixedArray(
- LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
- ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
+ LoadElements(array),
+ base::Optional<TNode<BInt>>(IntPtrOrSmiConstant<BInt>(0)),
+ base::Optional<TNode<BInt>>(TaggedToParameter<BInt>(CAST(length))),
+ base::Optional<TNode<BInt>>(base::nullopt),
+ ExtractFixedArrayFlag::kAllFixedArrays, &var_holes_converted);
var_new_elements = new_elements;
// If the array type didn't change, use the original elements kind.
GotoIfNot(var_holes_converted.value(), &allocate_jsarray);
@@ -3826,25 +3762,29 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
return result;
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
- ElementsKind kind, Node* capacity, ParameterMode mode,
- AllocationFlags flags, SloppyTNode<Map> fixed_array_map) {
+ ElementsKind kind, TNode<TIndex> capacity, AllocationFlags flags,
+ base::Optional<TNode<Map>> fixed_array_map) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT capacity is allowed");
Comment("AllocateFixedArray");
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
- CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity,
- IntPtrOrSmiConstant(0, mode), mode));
+ CSA_ASSERT(this,
+ IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant<TIndex>(0)));
const intptr_t kMaxLength = IsDoubleElementsKind(kind)
? FixedDoubleArray::kMaxLength
: FixedArray::kMaxLength;
+ const ParameterMode parameter_mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
intptr_t capacity_constant;
- if (ToParameterConstant(capacity, &capacity_constant, mode)) {
+ if (ToParameterConstant(capacity, &capacity_constant, parameter_mode)) {
CHECK_LE(capacity_constant, kMaxLength);
} else {
Label if_out_of_memory(this, Label::kDeferred), next(this);
- Branch(IntPtrOrSmiGreaterThan(
- capacity,
- IntPtrOrSmiConstant(static_cast<int>(kMaxLength), mode), mode),
+ Branch(IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant<TIndex>(
+ static_cast<int>(kMaxLength))),
&if_out_of_memory, &next);
BIND(&if_out_of_memory);
@@ -3855,12 +3795,12 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
BIND(&next);
}
- TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind, mode);
+ TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind);
if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
TNode<HeapObject> array = Allocate(total_size, flags);
- if (fixed_array_map != nullptr) {
+ if (fixed_array_map) {
// Conservatively only skip the write barrier if there are no allocation
// flags, this ensures that the object hasn't ended up in LOS. Note that the
// fixed array map is currently always immortal and technically wouldn't
@@ -3868,9 +3808,9 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
// in case this invariant changes later, since it's difficult to enforce
// locally here.
if (flags == CodeStubAssembler::kNone) {
- StoreMapNoWriteBarrier(array, fixed_array_map);
+ StoreMapNoWriteBarrier(array, *fixed_array_map);
} else {
- StoreMap(array, fixed_array_map);
+ StoreMap(array, *fixed_array_map);
}
} else {
RootIndex map_index = IsDoubleElementsKind(kind)
@@ -3880,23 +3820,32 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
StoreMapNoWriteBarrier(array, map_index);
}
StoreObjectFieldNoWriteBarrier(array, FixedArrayBase::kLengthOffset,
- ParameterToTagged(capacity, mode));
+ ParameterToTagged(capacity));
return UncheckedCast<FixedArrayBase>(array);
}
+// There is no need to export the Smi version since it is only used inside
+// code-stub-assembler.
+template V8_EXPORT_PRIVATE TNode<FixedArrayBase>
+ CodeStubAssembler::AllocateFixedArray<IntPtrT>(ElementsKind, TNode<IntPtrT>,
+ AllocationFlags,
+ base::Optional<TNode<Map>>);
+
+template <typename TIndex>
TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, Node* first, Node* count,
- Node* capacity, SloppyTNode<Map> source_map, ElementsKind from_kind,
- AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
- ParameterMode parameter_mode, HoleConversionMode convert_holes,
+ SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
+ TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
+ ElementsKind from_kind, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
TVariable<BoolT>* var_holes_converted,
base::Optional<TNode<Int32T>> source_elements_kind) {
- DCHECK_NE(first, nullptr);
- DCHECK_NE(count, nullptr);
- DCHECK_NE(capacity, nullptr);
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT first, count, and capacity are allowed");
+
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
- CSA_ASSERT(this, IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode),
- capacity, parameter_mode));
+ CSA_ASSERT(this,
+ IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), capacity));
CSA_ASSERT(this, TaggedEqual(source_map, LoadMap(source)));
TVARIABLE(FixedArrayBase, var_result);
@@ -3924,8 +3873,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// 1) |extract_flags| forces us to, or
// 2) we're asked to extract only part of the |source| (|first| != 0).
if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
- Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode),
- first, parameter_mode),
+ Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), first),
&new_space_check, [&] {
var_result = source;
Goto(&done);
@@ -3937,6 +3885,9 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
}
}
+ const ParameterMode parameter_mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
+
BIND(&new_space_check);
{
bool handle_old_space = !FLAG_young_generation_large_objects;
@@ -3944,7 +3895,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
handle_old_space = false;
CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace(
- count, FixedArray::kHeaderSize, parameter_mode)));
+ count, FixedArray::kHeaderSize)));
} else {
int constant_count;
handle_old_space =
@@ -3957,17 +3908,16 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
Label old_space(this, Label::kDeferred);
if (handle_old_space) {
- GotoIfFixedArraySizeDoesntFitInNewSpace(
- capacity, &old_space, FixedArray::kHeaderSize, parameter_mode);
+ GotoIfFixedArraySizeDoesntFitInNewSpace(capacity, &old_space,
+ FixedArray::kHeaderSize);
}
Comment("Copy FixedArray in young generation");
// We use PACKED_ELEMENTS to tell AllocateFixedArray and
// CopyFixedArrayElements that we want a FixedArray.
const ElementsKind to_kind = PACKED_ELEMENTS;
- TNode<FixedArrayBase> to_elements =
- AllocateFixedArray(to_kind, capacity, parameter_mode, allocation_flags,
- var_target_map.value());
+ TNode<FixedArrayBase> to_elements = AllocateFixedArray(
+ to_kind, capacity, allocation_flags, var_target_map.value());
var_result = to_elements;
#ifndef V8_ENABLE_SINGLE_GENERATION
@@ -3993,13 +3943,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
FillFixedArrayWithValue(to_kind, to_elements, count, capacity,
RootIndex::kTheHoleValue, parameter_mode);
CopyElements(to_kind, to_elements, IntPtrConstant(0), source,
- ParameterToIntPtr(first, parameter_mode),
- ParameterToIntPtr(count, parameter_mode),
+ ParameterToIntPtr(first), ParameterToIntPtr(count),
SKIP_WRITE_BARRIER);
} else {
CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
- count, capacity, SKIP_WRITE_BARRIER,
- parameter_mode, convert_holes,
+ count, capacity, SKIP_WRITE_BARRIER, convert_holes,
var_holes_converted);
}
Goto(&done);
@@ -4018,9 +3966,8 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
&copy_one_by_one);
const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS;
- to_elements =
- AllocateFixedArray(to_smi_kind, capacity, parameter_mode,
- allocation_flags, var_target_map.value());
+ to_elements = AllocateFixedArray(
+ to_smi_kind, capacity, allocation_flags, var_target_map.value());
var_result = to_elements;
FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity,
@@ -4029,8 +3976,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// GC. Otherwise it will copy elements by elements, but skip write
// barriers (since we're copying smis to smis).
CopyElements(to_smi_kind, to_elements, IntPtrConstant(0), source,
- ParameterToIntPtr(first, parameter_mode),
- ParameterToIntPtr(count, parameter_mode),
+ ParameterToIntPtr(first), ParameterToIntPtr(count),
SKIP_WRITE_BARRIER);
Goto(&done);
} else {
@@ -4039,14 +3985,12 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
BIND(&copy_one_by_one);
{
- to_elements =
- AllocateFixedArray(to_kind, capacity, parameter_mode,
- allocation_flags, var_target_map.value());
+ to_elements = AllocateFixedArray(to_kind, capacity, allocation_flags,
+ var_target_map.value());
var_result = to_elements;
CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
count, capacity, UPDATE_WRITE_BARRIER,
- parameter_mode, convert_holes,
- var_holes_converted);
+ convert_holes, var_holes_converted);
Goto(&done);
}
}
@@ -4057,21 +4001,26 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
return UncheckedCast<FixedArray>(var_result.value());
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
- TNode<FixedArrayBase> from_array, Node* first, Node* count, Node* capacity,
- TNode<Map> fixed_array_map, TVariable<BoolT>* var_holes_converted,
- AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
- ParameterMode mode) {
- DCHECK_NE(first, nullptr);
- DCHECK_NE(count, nullptr);
- DCHECK_NE(capacity, nullptr);
+ TNode<FixedArrayBase> from_array, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> fixed_array_map,
+ TVariable<BoolT>* var_holes_converted, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT first, count, and capacity are allowed");
+
DCHECK_NE(var_holes_converted, nullptr);
CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map));
+ const ParameterMode parameter_mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
+
TVARIABLE(FixedArrayBase, var_result);
const ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
- TNode<FixedArrayBase> to_elements = AllocateFixedArray(
- kind, capacity, mode, allocation_flags, fixed_array_map);
+ TNode<FixedArrayBase> to_elements =
+ AllocateFixedArray(kind, capacity, allocation_flags, fixed_array_map);
var_result = to_elements;
// We first try to copy the FixedDoubleArray to a new FixedDoubleArray.
// |var_holes_converted| is set to False preliminarily.
@@ -4079,25 +4028,23 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
// The construction of the loop and the offsets for double elements is
// extracted from CopyFixedArrayElements.
- CSA_SLOW_ASSERT(this, MatchesParameterMode(count, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
Comment("[ ExtractFixedDoubleArrayFillingHoles");
// This copy can trigger GC, so we pre-initialize the array with holes.
- FillFixedArrayWithValue(kind, to_elements, IntPtrOrSmiConstant(0, mode),
- capacity, RootIndex::kTheHoleValue, mode);
+ FillFixedArrayWithValue(kind, to_elements, IntPtrOrSmiConstant<TIndex>(0),
+ capacity, RootIndex::kTheHoleValue, parameter_mode);
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_from_element_offset =
- ElementOffsetFromIndex(first, kind, mode, 0);
+ ElementOffsetFromIndex(first, kind, 0);
TNode<IntPtrT> limit_offset = IntPtrAdd(first_from_element_offset,
IntPtrConstant(first_element_offset));
TVARIABLE(IntPtrT, var_from_offset,
- ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
- mode, first_element_offset));
+ ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count), kind,
+ first_element_offset));
Label decrement(this, {&var_from_offset}), done(this);
TNode<IntPtrT> to_array_adjusted =
@@ -4132,7 +4079,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
*var_holes_converted = Int32TrueConstant();
to_elements =
ExtractToFixedArray(from_array, first, count, capacity, fixed_array_map,
- kind, allocation_flags, extract_flags, mode,
+ kind, allocation_flags, extract_flags,
HoleConversionMode::kConvertToUndefined);
var_result = to_elements;
Goto(&done);
@@ -4143,15 +4090,19 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
return var_result.value();
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
- TNode<FixedArrayBase> source, Node* first, Node* count, Node* capacity,
- ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
- TVariable<BoolT>* var_holes_converted,
- base::Optional<TNode<Int32T>> source_runtime_kind) {
+ TNode<FixedArrayBase> source, base::Optional<TNode<TIndex>> first,
+ base::Optional<TNode<TIndex>> count, base::Optional<TNode<TIndex>> capacity,
+ ExtractFixedArrayFlags extract_flags, TVariable<BoolT>* var_holes_converted,
+ base::Optional<TNode<Int32T>> source_elements_kind) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT first, count, and capacity are allowed");
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays ||
extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays);
- // If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should not
- // be used, because that disables the iteration which detects holes.
+ // If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should
+ // not be used, because that disables the iteration which detects holes.
DCHECK_IMPLIES(var_holes_converted != nullptr,
!(extract_flags & ExtractFixedArrayFlag::kDontCopyCOW));
HoleConversionMode convert_holes =
@@ -4162,31 +4113,26 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
(extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
? CodeStubAssembler::kNone
: CodeStubAssembler::kAllowLargeObjectAllocation;
- if (first == nullptr) {
- first = IntPtrOrSmiConstant(0, parameter_mode);
+ if (!first) {
+ first = IntPtrOrSmiConstant<TIndex>(0);
}
- if (count == nullptr) {
+ if (!count) {
count = IntPtrOrSmiSub(
- TaggedToParameter(LoadFixedArrayBaseLength(source), parameter_mode),
- first, parameter_mode);
+ TaggedToParameter<TIndex>(LoadFixedArrayBaseLength(source)), *first);
- CSA_ASSERT(
- this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode),
- count, parameter_mode));
+ CSA_ASSERT(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant<TIndex>(0),
+ *count));
}
- if (capacity == nullptr) {
- capacity = count;
+ if (!capacity) {
+ capacity = *count;
} else {
CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
- IntPtrOrSmiAdd(first, count, parameter_mode), capacity,
- parameter_mode)));
+ IntPtrOrSmiAdd(*first, *count), *capacity)));
}
Label if_fixed_double_array(this), empty(this), done(this, &var_result);
TNode<Map> source_map = LoadMap(source);
- GotoIf(IntPtrOrSmiEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity,
- parameter_mode),
- &empty);
+ GotoIf(IntPtrOrSmiEqual(IntPtrOrSmiConstant<TIndex>(0), *capacity), &empty);
if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
@@ -4196,13 +4142,15 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
}
}
+ const ParameterMode parameter_mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
// Here we can only get |source| as FixedArray, never FixedDoubleArray.
// PACKED_ELEMENTS is used to signify that the source is a FixedArray.
TNode<FixedArray> to_elements = ExtractToFixedArray(
- source, first, count, capacity, source_map, PACKED_ELEMENTS,
- allocation_flags, extract_flags, parameter_mode, convert_holes,
- var_holes_converted, source_runtime_kind);
+ source, *first, *count, *capacity, source_map, PACKED_ELEMENTS,
+ allocation_flags, extract_flags, convert_holes, var_holes_converted,
+ source_elements_kind);
var_result = to_elements;
Goto(&done);
}
@@ -4213,21 +4161,21 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
if (convert_holes == HoleConversionMode::kConvertToUndefined) {
TNode<FixedArrayBase> to_elements = ExtractFixedDoubleArrayFillingHoles(
- source, first, count, capacity, source_map, var_holes_converted,
- allocation_flags, extract_flags, parameter_mode);
+ source, *first, *count, *capacity, source_map, var_holes_converted,
+ allocation_flags, extract_flags);
var_result = to_elements;
} else {
// We use PACKED_DOUBLE_ELEMENTS to signify that both the source and
// the target are FixedDoubleArray. That it is PACKED or HOLEY does not
// matter.
ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
- TNode<FixedArrayBase> to_elements = AllocateFixedArray(
- kind, capacity, parameter_mode, allocation_flags, source_map);
- FillFixedArrayWithValue(kind, to_elements, count, capacity,
+ TNode<FixedArrayBase> to_elements =
+ AllocateFixedArray(kind, *capacity, allocation_flags, source_map);
+ FillFixedArrayWithValue(kind, to_elements, *count, *capacity,
RootIndex::kTheHoleValue, parameter_mode);
CopyElements(kind, to_elements, IntPtrConstant(0), source,
- ParameterToIntPtr(first, parameter_mode),
- ParameterToIntPtr(count, parameter_mode));
+ ParameterToIntPtr(*first, parameter_mode),
+ ParameterToIntPtr(*count, parameter_mode));
var_result = to_elements;
}
@@ -4246,51 +4194,54 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
return var_result.value();
}
+template V8_EXPORT_PRIVATE TNode<FixedArrayBase>
+CodeStubAssembler::ExtractFixedArray<Smi>(
+ TNode<FixedArrayBase>, base::Optional<TNode<Smi>>,
+ base::Optional<TNode<Smi>>, base::Optional<TNode<Smi>>,
+ ExtractFixedArrayFlags, TVariable<BoolT>*, base::Optional<TNode<Int32T>>);
+
+template V8_EXPORT_PRIVATE TNode<FixedArrayBase>
+CodeStubAssembler::ExtractFixedArray<IntPtrT>(
+ TNode<FixedArrayBase>, base::Optional<TNode<IntPtrT>>,
+ base::Optional<TNode<IntPtrT>>, base::Optional<TNode<IntPtrT>>,
+ ExtractFixedArrayFlags, TVariable<BoolT>*, base::Optional<TNode<Int32T>>);
+
void CodeStubAssembler::InitializePropertyArrayLength(
- TNode<PropertyArray> property_array, Node* length, ParameterMode mode) {
- CSA_ASSERT(
- this, IntPtrOrSmiGreaterThan(length, IntPtrOrSmiConstant(0, mode), mode));
- CSA_ASSERT(
- this,
- IntPtrOrSmiLessThanOrEqual(
- length, IntPtrOrSmiConstant(PropertyArray::LengthField::kMax, mode),
- mode));
- StoreObjectFieldNoWriteBarrier(property_array,
- PropertyArray::kLengthAndHashOffset,
- ParameterToTagged(length, mode));
+ TNode<PropertyArray> property_array, TNode<IntPtrT> length) {
+ CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(
+ length, IntPtrConstant(PropertyArray::LengthField::kMax)));
+ StoreObjectFieldNoWriteBarrier(
+ property_array, PropertyArray::kLengthAndHashOffset, SmiTag(length));
}
TNode<PropertyArray> CodeStubAssembler::AllocatePropertyArray(
- Node* capacity_node, ParameterMode mode, AllocationFlags flags) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
- CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
- IntPtrOrSmiConstant(0, mode), mode));
- TNode<IntPtrT> total_size =
- GetPropertyArrayAllocationSize(capacity_node, mode);
+ TNode<IntPtrT> capacity) {
+ CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
+ TNode<IntPtrT> total_size = GetPropertyArrayAllocationSize(capacity);
- TNode<HeapObject> array = Allocate(total_size, flags);
+ TNode<HeapObject> array = Allocate(total_size, kNone);
RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
TNode<PropertyArray> property_array = CAST(array);
- InitializePropertyArrayLength(property_array, capacity_node, mode);
+ InitializePropertyArrayLength(property_array, capacity);
return property_array;
}
void CodeStubAssembler::FillPropertyArrayWithUndefined(
- TNode<PropertyArray> array, Node* from_node, Node* to_node,
- ParameterMode mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
+ TNode<PropertyArray> array, TNode<IntPtrT> from_index,
+ TNode<IntPtrT> to_index) {
ElementsKind kind = PACKED_ELEMENTS;
TNode<Oddball> value = UndefinedConstant();
- BuildFastFixedArrayForEach(
- array, kind, from_node, to_node,
- [this, value](Node* array, Node* offset) {
+ BuildFastArrayForEach(
+ array, kind, from_index, to_index,
+ [this, value](TNode<HeapObject> array, TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
},
- mode);
+ INTPTR_PARAMETERS);
}
void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
@@ -4312,9 +4263,10 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
float_value = LoadHeapNumberValue(CAST(value));
}
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
array, kind, from_node, to_node,
- [this, value, float_value, kind](Node* array, Node* offset) {
+ [this, value, float_value, kind](TNode<HeapObject> array,
+ TNode<IntPtrT> offset) {
if (IsDoubleElementsKind(kind)) {
StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset,
float_value);
@@ -4346,12 +4298,10 @@ void CodeStubAssembler::StoreDoubleHole(TNode<HeapObject> object,
}
}
-void CodeStubAssembler::StoreFixedDoubleArrayHole(
- TNode<FixedDoubleArray> array, Node* index, ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode));
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, parameter_mode,
- FixedArray::kHeaderSize - kHeapObjectTag);
+void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
+ TNode<IntPtrT> index) {
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index, PACKED_DOUBLE_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag);
CSA_ASSERT(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(array),
FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
@@ -4472,7 +4422,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, begin),
IntPtrConstant(ElementsKindToByteSize(kind)));
- auto loop_body = [&](Node* array, Node* offset) {
+ auto loop_body = [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
Store(array, delta_offset, element);
@@ -4485,17 +4435,15 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
BIND(&iterate_forward);
{
// Make a loop for the stores.
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kForward);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kForward);
Goto(&finished);
}
BIND(&iterate_backward);
{
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kReverse);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kReverse);
Goto(&finished);
}
}
@@ -4563,9 +4511,9 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, src_index),
IntPtrConstant(ElementsKindToByteSize(kind)));
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
src_elements, kind, begin, end,
- [&](Node* array, Node* offset) {
+ [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
if (write_barrier == SKIP_WRITE_BARRIER) {
@@ -4582,19 +4530,22 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
}
}
+template <typename TIndex>
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, TNode<FixedArrayBase> from_array,
- ElementsKind to_kind, TNode<FixedArrayBase> to_array, Node* first_element,
- Node* element_count, Node* capacity, WriteBarrierMode barrier_mode,
- ParameterMode mode, HoleConversionMode convert_holes,
- TVariable<BoolT>* var_holes_converted) {
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array,
+ TNode<TIndex> first_element, TNode<TIndex> element_count,
+ TNode<TIndex> capacity, WriteBarrierMode barrier_mode,
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
DCHECK_IMPLIES(var_holes_converted != nullptr,
convert_holes == HoleConversionMode::kConvertToUndefined);
- CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT indices are allowed");
+
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
Comment("[ CopyFixedArrayElements");
@@ -4618,6 +4569,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
+ const ParameterMode mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
// If copying might trigger a GC, we pre-initialize the FixedArray such that
// it's always in a consistent state.
if (convert_holes == HoleConversionMode::kConvertToUndefined) {
@@ -4625,14 +4578,14 @@ void CodeStubAssembler::CopyFixedArrayElements(
// Use undefined for the part that we copy and holes for the rest.
// Later if we run into a hole in the source we can just skip the writing
// to the target and are still guaranteed that we get an undefined.
- FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
+ FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant<TIndex>(0),
element_count, RootIndex::kUndefinedValue, mode);
FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
RootIndex::kTheHoleValue, mode);
} else if (doubles_to_objects_conversion) {
// Pre-initialized the target with holes so later if we run into a hole in
// the source we can just skip the writing to the target.
- FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
+ FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant<TIndex>(0),
capacity, RootIndex::kTheHoleValue, mode);
} else if (element_count != capacity) {
FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
@@ -4640,27 +4593,25 @@ void CodeStubAssembler::CopyFixedArrayElements(
}
TNode<IntPtrT> first_from_element_offset =
- ElementOffsetFromIndex(first_element, from_kind, mode, 0);
+ ElementOffsetFromIndex(first_element, from_kind, 0);
TNode<IntPtrT> limit_offset = Signed(IntPtrAdd(
first_from_element_offset, IntPtrConstant(first_element_offset)));
- TVARIABLE(
- IntPtrT, var_from_offset,
- ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count, mode),
- from_kind, mode, first_element_offset));
+ TVARIABLE(IntPtrT, var_from_offset,
+ ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count),
+ from_kind, first_element_offset));
// This second variable is used only when the element sizes of source and
// destination arrays do not match.
TVARIABLE(IntPtrT, var_to_offset);
if (element_offset_matches) {
var_to_offset = var_from_offset.value();
} else {
- var_to_offset = ElementOffsetFromIndex(element_count, to_kind, mode,
- first_element_offset);
+ var_to_offset =
+ ElementOffsetFromIndex(element_count, to_kind, first_element_offset);
}
- Variable* vars[] = {&var_from_offset, &var_to_offset, var_holes_converted};
- int num_vars =
- var_holes_converted != nullptr ? arraysize(vars) : arraysize(vars) - 1;
- Label decrement(this, num_vars, vars);
+ VariableList vars({&var_from_offset, &var_to_offset}, zone());
+ if (var_holes_converted != nullptr) vars.push_back(var_holes_converted);
+ Label decrement(this, vars);
TNode<IntPtrT> to_array_adjusted =
element_offset_matches
@@ -4757,12 +4708,6 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-TNode<JSAggregateError> CodeStubAssembler::HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail) {
- GotoIfNot(IsJSAggregateError(heap_object), fail);
- return UncheckedCast<JSAggregateError>(heap_object);
-}
-
TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
@@ -4776,11 +4721,9 @@ TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
TNode<PropertyArray> to_array,
- Node* property_count,
+ TNode<IntPtrT> property_count,
WriteBarrierMode barrier_mode,
- ParameterMode mode,
DestroySource destroy_source) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(property_count, mode));
CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array),
IsEmptyFixedArray(from_array)));
Comment("[ CopyPropertyArrayValues");
@@ -4793,12 +4736,12 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
needs_write_barrier = true;
}
- Node* start = IntPtrOrSmiConstant(0, mode);
+ TNode<IntPtrT> start = IntPtrConstant(0);
ElementsKind kind = PACKED_ELEMENTS;
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
from_array, kind, start, property_count,
- [this, to_array, needs_write_barrier, destroy_source](Node* array,
- Node* offset) {
+ [this, to_array, needs_write_barrier, destroy_source](
+ TNode<HeapObject> array, TNode<IntPtrT> offset) {
TNode<AnyTaggedT> value = Load<AnyTaggedT>(array, offset);
if (destroy_source == DestroySource::kNo) {
@@ -4812,15 +4755,14 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
value);
}
},
- mode);
+ INTPTR_PARAMETERS);
#ifdef DEBUG
// Zap {from_array} if the copying above has made it invalid.
if (destroy_source == DestroySource::kYes) {
Label did_zap(this);
GotoIf(IsEmptyFixedArray(from_array), &did_zap);
- FillPropertyArrayWithUndefined(CAST(from_array), start, property_count,
- mode);
+ FillPropertyArrayWithUndefined(CAST(from_array), start, property_count);
Goto(&did_zap);
BIND(&did_zap);
@@ -4829,11 +4771,17 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
Comment("] CopyPropertyArrayValues");
}
-Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
- Node* offset,
- ElementsKind from_kind,
- ElementsKind to_kind,
- Label* if_hole) {
+TNode<FixedArrayBase> CodeStubAssembler::CloneFixedArray(
+ TNode<FixedArrayBase> source, ExtractFixedArrayFlags flags) {
+ return ExtractFixedArray(
+ source, base::Optional<TNode<BInt>>(IntPtrOrSmiConstant<BInt>(0)),
+ base::Optional<TNode<BInt>>(base::nullopt),
+ base::Optional<TNode<BInt>>(base::nullopt), flags);
+}
+
+Node* CodeStubAssembler::LoadElementAndPrepareForStore(
+ TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
+ ElementsKind to_kind, Label* if_hole) {
CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
@@ -4858,75 +4806,86 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
}
}
-Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
- ParameterMode mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(old_capacity, mode));
- Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
- Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
- Node* padding =
- IntPtrOrSmiConstant(JSObject::kMinAddedElementsCapacity, mode);
- return IntPtrOrSmiAdd(new_capacity, padding, mode);
+template <typename TIndex>
+TNode<TIndex> CodeStubAssembler::CalculateNewElementsCapacity(
+ TNode<TIndex> old_capacity) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT old_capacity is allowed");
+ Comment("TryGrowElementsCapacity");
+ TNode<TIndex> half_old_capacity = WordOrSmiShr(old_capacity, 1);
+ TNode<TIndex> new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity);
+ TNode<TIndex> padding =
+ IntPtrOrSmiConstant<TIndex>(JSObject::kMinAddedElementsCapacity);
+ return IntPtrOrSmiAdd(new_capacity, padding);
}
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+ CodeStubAssembler::CalculateNewElementsCapacity<IntPtrT>(TNode<IntPtrT>);
+template V8_EXPORT_PRIVATE TNode<Smi>
+ CodeStubAssembler::CalculateNewElementsCapacity<Smi>(TNode<Smi>);
+
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
TNode<Smi> key, Label* bailout) {
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
- ParameterMode mode = OptimalParameterMode();
- return TryGrowElementsCapacity(
- object, elements, kind, TaggedToParameter(key, mode),
- TaggedToParameter(capacity, mode), mode, bailout);
+ return TryGrowElementsCapacity(object, elements, kind,
+ TaggedToParameter<BInt>(key),
+ TaggedToParameter<BInt>(capacity), bailout);
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
- Node* key, Node* capacity, ParameterMode mode, Label* bailout) {
+ TNode<TIndex> key, TNode<TIndex> capacity, Label* bailout) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT key and capacity nodes are allowed");
Comment("TryGrowElementsCapacity");
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode));
// If the gap growth is too big, fall back to the runtime.
- Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
- Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
- GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
+ TNode<TIndex> max_gap = IntPtrOrSmiConstant<TIndex>(JSObject::kMaxGap);
+ TNode<TIndex> max_capacity = IntPtrOrSmiAdd(capacity, max_gap);
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity), bailout);
// Calculate the capacity of the new backing store.
- Node* new_capacity = CalculateNewElementsCapacity(
- IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
+ TNode<TIndex> new_capacity = CalculateNewElementsCapacity(
+ IntPtrOrSmiAdd(key, IntPtrOrSmiConstant<TIndex>(1)));
+
return GrowElementsCapacity(object, elements, kind, kind, capacity,
- new_capacity, mode, bailout);
+ new_capacity, bailout);
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements,
- ElementsKind from_kind, ElementsKind to_kind, Node* capacity,
- Node* new_capacity, ParameterMode mode, Label* bailout) {
+ ElementsKind from_kind, ElementsKind to_kind, TNode<TIndex> capacity,
+ TNode<TIndex> new_capacity, Label* bailout) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT capacities are allowed");
Comment("[ GrowElementsCapacity");
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(new_capacity, mode));
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
- GotoIf(UintPtrOrSmiGreaterThanOrEqual(
- new_capacity, IntPtrOrSmiConstant(max_size, mode), mode),
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(new_capacity,
+ IntPtrOrSmiConstant<TIndex>(max_size)),
bailout);
// Allocate the new backing store.
TNode<FixedArrayBase> new_elements =
- AllocateFixedArray(to_kind, new_capacity, mode);
+ AllocateFixedArray(to_kind, new_capacity);
// Copy the elements from the old elements store to the new.
// The size-check above guarantees that the |new_elements| is allocated
// in new space so we can skip the write barrier.
- CopyFixedArrayElements(from_kind, elements, to_kind, new_elements,
- UncheckedCast<IntPtrT>(capacity),
- UncheckedCast<IntPtrT>(new_capacity),
- SKIP_WRITE_BARRIER, mode);
+ CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity,
+ new_capacity, SKIP_WRITE_BARRIER);
StoreObjectField(object, JSObject::kElementsOffset, new_elements);
Comment("] GrowElementsCapacity");
@@ -5033,10 +4992,9 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We might need to loop after conversion.
TVARIABLE(Object, var_value, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
- Variable* loop_vars[] = {&var_value, var_feedback};
- int num_vars =
- var_feedback != nullptr ? arraysize(loop_vars) : arraysize(loop_vars) - 1;
- Label loop(this, num_vars, loop_vars);
+ VariableList loop_vars({&var_value}, zone());
+ if (var_feedback != nullptr) loop_vars.push_back(var_feedback);
+ Label loop(this, loop_vars);
Goto(&loop);
BIND(&loop);
{
@@ -5664,14 +5622,6 @@ TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
return IsSetWord32<Map::Bits1::IsCallableBit>(LoadMapBitField(map));
}
-TNode<BoolT> CodeStubAssembler::IsCoverageInfo(TNode<HeapObject> object) {
- return IsCoverageInfoMap(LoadMap(object));
-}
-
-TNode<BoolT> CodeStubAssembler::IsDebugInfo(TNode<HeapObject> object) {
- return HasInstanceType(object, DEBUG_INFO_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::Bits3::IsDeprecatedBit>(LoadMapBitField3(map));
@@ -5803,14 +5753,6 @@ TNode<BoolT> CodeStubAssembler::IsCallable(SloppyTNode<HeapObject> object) {
return IsCallableMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsCell(SloppyTNode<HeapObject> object) {
- return TaggedEqual(LoadMap(object), CellMapConstant());
-}
-
-TNode<BoolT> CodeStubAssembler::IsCode(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, CODE_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsConstructorMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::Bits1::IsConstructorBit>(LoadMapBitField(map));
@@ -5861,6 +5803,15 @@ TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
Int32Constant(kSeqStringTag));
}
+TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
+ TNode<Int32T> instance_type) {
+ CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ return Word32Equal(
+ Word32And(instance_type,
+ Int32Constant(kStringRepresentationMask | kStringEncodingMask)),
+ Int32Constant(kSeqStringTag | kOneByteStringTag));
+}
+
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
SloppyTNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
@@ -6002,10 +5953,6 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSAggregateError(TNode<HeapObject> object) {
- return HasInstanceType(object, JS_AGGREGATE_ERROR_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
@@ -6029,13 +5976,6 @@ TNode<BoolT> CodeStubAssembler::IsJSAsyncGeneratorObject(
return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsContext(SloppyTNode<HeapObject> object) {
- TNode<Uint16T> instance_type = LoadInstanceType(object);
- return UncheckedCast<BoolT>(Word32And(
- Int32GreaterThanOrEqual(instance_type, Int32Constant(FIRST_CONTEXT_TYPE)),
- Int32LessThanOrEqual(instance_type, Int32Constant(LAST_CONTEXT_TYPE))));
-}
-
TNode<BoolT> CodeStubAssembler::IsFixedArray(SloppyTNode<HeapObject> object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
@@ -6059,21 +5999,11 @@ TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
Int32Constant(LAST_WEAK_FIXED_ARRAY_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsPromiseCapability(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, PROMISE_CAPABILITY_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsPropertyArray(
SloppyTNode<HeapObject> object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsPromiseReaction(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, PROMISE_REACTION_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsPromiseReactionJobTask(
TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
@@ -6081,16 +6011,6 @@ TNode<BoolT> CodeStubAssembler::IsPromiseReactionJobTask(
LAST_PROMISE_REACTION_JOB_TASK_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsPromiseRejectReactionJobTask(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, PROMISE_REJECT_REACTION_JOB_TASK_TYPE);
-}
-
-TNode<BoolT> CodeStubAssembler::IsPromiseFulfillReactionJobTask(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE);
-}
-
// This complicated check is due to elements oddities. If a smi array is empty
// after Array.p.shift, it is replaced by the empty array constant. If it is
// later filled with a double element, we try to grow it but pass in a double
@@ -6136,23 +6056,6 @@ TNode<BoolT> CodeStubAssembler::IsPropertyCell(SloppyTNode<HeapObject> object) {
return IsPropertyCellMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsAccessorInfo(SloppyTNode<HeapObject> object) {
- return IsAccessorInfoMap(LoadMap(object));
-}
-
-TNode<BoolT> CodeStubAssembler::IsAccessorPair(SloppyTNode<HeapObject> object) {
- return IsAccessorPairMap(LoadMap(object));
-}
-
-TNode<BoolT> CodeStubAssembler::IsAllocationSite(
- SloppyTNode<HeapObject> object) {
- return IsAllocationSiteInstanceType(LoadInstanceType(object));
-}
-
-TNode<BoolT> CodeStubAssembler::IsHeapNumber(SloppyTNode<HeapObject> object) {
- return IsHeapNumberMap(LoadMap(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
@@ -6167,15 +6070,6 @@ TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFeedbackCell(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, FEEDBACK_CELL_TYPE);
-}
-
-TNode<BoolT> CodeStubAssembler::IsFeedbackVector(
- SloppyTNode<HeapObject> object) {
- return IsFeedbackVectorMap(LoadMap(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsName(SloppyTNode<HeapObject> object) {
return IsNameInstanceType(LoadInstanceType(object));
}
@@ -6189,15 +6083,15 @@ TNode<BoolT> CodeStubAssembler::IsString(SloppyTNode<HeapObject> object) {
return IsStringInstanceType(LoadInstanceType(object));
}
+TNode<BoolT> CodeStubAssembler::IsSeqOneByteString(TNode<HeapObject> object) {
+ return IsSeqOneByteStringInstanceType(LoadInstanceType(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsSymbolInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, SYMBOL_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsSymbol(SloppyTNode<HeapObject> object) {
- return IsSymbolMap(LoadMap(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsInternalizedStringInstanceType(
TNode<Int32T> instance_type) {
STATIC_ASSERT(kNotInternalizedTag != 0);
@@ -6263,34 +6157,11 @@ TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
- SloppyTNode<HeapObject> object) {
- return Select<BoolT>(
- IsSymbol(object),
- [=] {
- TNode<Symbol> symbol = CAST(object);
- TNode<Uint32T> flags =
- LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
- return IsSetWord32<Symbol::IsPrivateBit>(flags);
- },
- [=] { return Int32FalseConstant(); });
-}
-
TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
TNode<Uint32T> flags = LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
}
-TNode<BoolT> CodeStubAssembler::IsNativeContext(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, NATIVE_CONTEXT_TYPE);
-}
-
-TNode<BoolT> CodeStubAssembler::IsFixedDoubleArray(
- SloppyTNode<HeapObject> object) {
- return TaggedEqual(LoadMap(object), FixedDoubleArrayMapConstant());
-}
-
TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(
@@ -6329,11 +6200,6 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsAllocationSiteInstanceType(
- SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, ALLOCATION_SITE_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
@@ -6373,12 +6239,6 @@ TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) {
- return Select<BoolT>(
- TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
- [=] { return IsHeapNumber(CAST(object)); });
-}
-
TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
return Select<BoolT>(
TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
@@ -6504,12 +6364,16 @@ TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(TNode<Number> number) {
[=] { return IsHeapNumberUint32(CAST(number)); });
}
+template <typename TIndex>
TNode<BoolT> CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(
- Node* element_count, int base_size, ParameterMode mode) {
+ TNode<TIndex> element_count, int base_size) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT element_count is allowed");
int max_newspace_elements =
(kMaxRegularHeapObjectSize - base_size) / kTaggedSize;
return IntPtrOrSmiGreaterThan(
- element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode);
+ element_count, IntPtrOrSmiConstant<TIndex>(max_newspace_elements));
}
TNode<Int32T> CodeStubAssembler::StringCharCodeAt(TNode<String> string,
@@ -6844,14 +6708,42 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask);
TNode<IntPtrT> entry_index =
Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
- TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
- number_string_cache, entry_index, 0, INTPTR_PARAMETERS);
- GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout);
+ TNode<Object> smi_key =
+ UnsafeLoadFixedArrayElement(number_string_cache, entry_index);
+ Label if_smi_cache_missed(this);
+ GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &if_smi_cache_missed);
// Smi match, return value from cache entry.
result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
- kTaggedSize, INTPTR_PARAMETERS));
+ kTaggedSize));
Goto(&done);
+
+ BIND(&if_smi_cache_missed);
+ {
+ Label store_to_cache(this);
+
+ // Bailout when the cache is not full-size.
+ const int kFullCacheSize =
+ isolate()->heap()->MaxNumberToStringCacheSize();
+ Branch(IntPtrLessThan(number_string_cache_length,
+ IntPtrConstant(kFullCacheSize)),
+ bailout, &store_to_cache);
+
+ BIND(&store_to_cache);
+ {
+ // Generate string and update string hash field.
+ result = NumberToStringSmi(SmiToInt32(smi_input.value()),
+ Int32Constant(10), bailout);
+
+ // Store string into cache.
+ StoreFixedArrayElement(number_string_cache, entry_index,
+ smi_input.value());
+ StoreFixedArrayElement(number_string_cache,
+ IntPtrAdd(entry_index, IntPtrConstant(1)),
+ result.value());
+ Goto(&done);
+ }
+ }
}
BIND(&done);
return result.value();
@@ -6861,6 +6753,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
TVARIABLE(String, result);
Label runtime(this, Label::kDeferred), done(this, &result);
+ GotoIfForceSlowPath(&runtime);
+
result = NumberToString(input, &runtime);
Goto(&done);
@@ -7149,7 +7043,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
Label out(this);
- VARIABLE(var_result, MachineRepresentation::kTagged, input);
+ TVARIABLE(Object, var_result, input);
// Early exit for positive smis.
{
@@ -7161,7 +7055,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
}
const TNode<Number> number = ToNumber(context, input);
- var_result.Bind(number);
+ var_result = number;
// Perhaps we have a positive smi now.
{
@@ -7177,7 +7071,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
{
const TNode<Int32T> uint32_value = SmiToInt32(CAST(number));
TNode<Float64T> float64_value = ChangeUint32ToFloat64(uint32_value);
- var_result.Bind(AllocateHeapNumberWithValue(float64_value));
+ var_result = AllocateHeapNumberWithValue(float64_value);
Goto(&out);
}
@@ -7229,13 +7123,13 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
x = Float64Mod(x, float_two_32);
const TNode<Number> result = ChangeFloat64ToTagged(x);
- var_result.Bind(result);
+ var_result = result;
Goto(&out);
}
BIND(&return_zero);
{
- var_result.Bind(SmiConstant(0));
+ var_result = SmiConstant(0);
Goto(&out);
}
}
@@ -7246,14 +7140,14 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input) {
- VARIABLE(var_result, MachineRepresentation::kTagged, input);
+ TVARIABLE(Object, var_result, input);
Label stub_call(this, Label::kDeferred), out(this);
GotoIf(TaggedIsSmi(input), &stub_call);
Branch(IsString(CAST(input)), &out, &stub_call);
BIND(&stub_call);
- var_result.Bind(CallBuiltin(Builtins::kToString, context, input));
+ var_result = CallBuiltin(Builtins::kToString, context, input);
Goto(&out);
BIND(&out);
@@ -7297,6 +7191,12 @@ TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
[=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
}
+TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
+ TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(isolate(), hint);
+ return CallStub(callable, context, input);
+}
+
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
@@ -7493,8 +7393,8 @@ void CodeStubAssembler::TryInternalizeString(
SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
Label* if_internalized, TVariable<Name>* var_internalized,
Label* if_not_internalized, Label* if_bailout) {
- TNode<ExternalReference> function =
- ExternalConstant(ExternalReference::try_internalize_string_function());
+ TNode<ExternalReference> function = ExternalConstant(
+ ExternalReference::try_string_to_index_or_lookup_existing());
const TNode<ExternalReference> isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
TNode<Object> result =
@@ -7703,11 +7603,11 @@ void CodeStubAssembler::NameDictionaryLookup(
TVARIABLE(IntPtrT, var_count, count);
TVARIABLE(IntPtrT, var_entry, entry);
- Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
- Label loop(this, arraysize(loop_vars), loop_vars);
+ Label loop(this, {&var_count, &var_entry, var_name_index});
Goto(&loop);
BIND(&loop);
{
+ Label next_probe(this);
TNode<IntPtrT> entry = var_entry.value();
TNode<IntPtrT> index = EntryToIndex<Dictionary>(entry);
@@ -7717,13 +7617,18 @@ void CodeStubAssembler::NameDictionaryLookup(
CAST(UnsafeLoadFixedArrayElement(dictionary, index));
GotoIf(TaggedEqual(current, undefined), if_not_found);
if (mode == kFindExisting) {
+ if (Dictionary::ShapeT::kMatchNeedsHoleCheck) {
+ GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe);
+ }
current = LoadName<Dictionary>(current);
GotoIf(TaggedEqual(current, unique_name), if_found);
} else {
DCHECK_EQ(kFindInsertionIndex, mode);
GotoIf(TaggedEqual(current, TheHoleConstant()), if_not_found);
}
+ Goto(&next_probe);
+ BIND(&next_probe);
// See Dictionary::NextProbe().
Increment(&var_count);
entry = Signed(WordAnd(IntPtrAdd(entry, var_count.value()), mask));
@@ -7779,8 +7684,7 @@ void CodeStubAssembler::NumberDictionaryLookup(
TNode<Oddball> the_hole = TheHoleConstant();
TVARIABLE(IntPtrT, var_count, count);
- Variable* loop_vars[] = {&var_count, var_entry};
- Label loop(this, 2, loop_vars);
+ Label loop(this, {&var_count, var_entry});
*var_entry = entry;
Goto(&loop);
BIND(&loop);
@@ -8149,10 +8053,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
&var_is_symbol_processing_loop, &var_start_key_index,
&var_end_key_index},
zone());
- Label descriptor_array_loop(
- this, {&var_descriptors, &var_stable, &var_has_symbol,
- &var_is_symbol_processing_loop, &var_start_key_index,
- &var_end_key_index});
+ Label descriptor_array_loop(this, list);
Goto(&descriptor_array_loop);
BIND(&descriptor_array_loop);
@@ -8422,16 +8323,6 @@ void CodeStubAssembler::Lookup(TNode<Name> unique_name, TNode<Array> array,
}
}
-TNode<BoolT> CodeStubAssembler::IsSimpleObjectMap(TNode<Map> map) {
- uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask |
- Map::Bits1::IsAccessCheckNeededBit::kMask;
- // !IsSpecialReceiverType && !IsNamedInterceptor && !IsAccessCheckNeeded
- return Select<BoolT>(
- IsSpecialReceiverInstanceType(LoadMapInstanceType(map)),
- [=] { return Int32FalseConstant(); },
- [=] { return IsClearWord32(LoadMapBitField(map), mask); });
-}
-
void CodeStubAssembler::TryLookupPropertyInSimpleObject(
TNode<JSObject> object, TNode<Map> map, TNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict,
@@ -8960,9 +8851,8 @@ void CodeStubAssembler::TryLookupElement(
GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
// Check if the element is a double hole, but don't load it.
- LoadFixedDoubleArrayElement(CAST(elements), intptr_index,
- MachineType::None(), 0, INTPTR_PARAMETERS,
- if_not_found);
+ LoadFixedDoubleArrayElement(CAST(elements), intptr_index, if_not_found,
+ MachineType::None());
Goto(if_found);
}
BIND(&if_isdictionary);
@@ -9672,11 +9562,11 @@ TNode<Uint8T> CodeStubAssembler::Float64ToUint8Clamped(
return UncheckedCast<Uint8T>(var_value.value());
}
-Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
+template <>
+TNode<Word32T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Word32T>(
TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
DCHECK(IsTypedArrayElementsKind(elements_kind));
- MachineRepresentation rep;
switch (elements_kind) {
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
@@ -9685,23 +9575,13 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- rep = MachineRepresentation::kWord32;
- break;
- case FLOAT32_ELEMENTS:
- rep = MachineRepresentation::kFloat32;
break;
- case FLOAT64_ELEMENTS:
- rep = MachineRepresentation::kFloat64;
- break;
- case BIGINT64_ELEMENTS:
- case BIGUINT64_ELEMENTS:
- return ToBigInt(context, input);
default:
UNREACHABLE();
}
- VARIABLE(var_result, rep);
- VARIABLE(var_input, MachineRepresentation::kTagged, input);
+ TVARIABLE(Word32T, var_result);
+ TVARIABLE(Object, var_input, input);
Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this),
convert(this), loop(this, &var_input);
Goto(&loop);
@@ -9710,52 +9590,134 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
// We can handle both HeapNumber and Oddball here, since Oddball has the
// same layout as the HeapNumber for the HeapNumber::value field. This
// way we can also properly optimize stores of oddballs to typed arrays.
- GotoIf(IsHeapNumber(var_input.value()), &if_heapnumber_or_oddball);
+ TNode<HeapObject> heap_object = CAST(var_input.value());
+ GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball);
STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
Oddball::kToNumberRawOffset);
- Branch(HasInstanceType(var_input.value(), ODDBALL_TYPE),
- &if_heapnumber_or_oddball, &convert);
+ Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball,
+ &convert);
BIND(&if_heapnumber_or_oddball);
{
- TNode<Float64T> value = UncheckedCast<Float64T>(LoadObjectField(
- var_input.value(), HeapNumber::kValueOffset, MachineType::Float64()));
- if (rep == MachineRepresentation::kWord32) {
- if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
- var_result.Bind(Float64ToUint8Clamped(value));
- } else {
- var_result.Bind(TruncateFloat64ToWord32(value));
- }
- } else if (rep == MachineRepresentation::kFloat32) {
- var_result.Bind(TruncateFloat64ToFloat32(value));
+ TNode<Float64T> value =
+ LoadObjectField<Float64T>(heap_object, HeapNumber::kValueOffset);
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ var_result = Float64ToUint8Clamped(value);
} else {
- DCHECK_EQ(MachineRepresentation::kFloat64, rep);
- var_result.Bind(value);
+ var_result = TruncateFloat64ToWord32(value);
}
Goto(&done);
}
BIND(&if_smi);
{
- TNode<Int32T> value = SmiToInt32(var_input.value());
- if (rep == MachineRepresentation::kFloat32) {
- var_result.Bind(RoundInt32ToFloat32(value));
- } else if (rep == MachineRepresentation::kFloat64) {
- var_result.Bind(ChangeInt32ToFloat64(value));
+ TNode<Int32T> value = SmiToInt32(CAST(var_input.value()));
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ var_result = Int32ToUint8Clamped(value);
} else {
- DCHECK_EQ(MachineRepresentation::kWord32, rep);
- if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
- var_result.Bind(Int32ToUint8Clamped(value));
- } else {
- var_result.Bind(value);
- }
+ var_result = value;
}
Goto(&done);
}
BIND(&convert);
{
- var_input.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, input));
+ var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
+ Goto(&loop);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
+template <>
+TNode<Float32T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Float32T>(
+ TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
+ DCHECK(IsTypedArrayElementsKind(elements_kind));
+ CHECK_EQ(elements_kind, FLOAT32_ELEMENTS);
+
+ TVARIABLE(Float32T, var_result);
+ TVARIABLE(Object, var_input, input);
+ Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this),
+ convert(this), loop(this, &var_input);
+ Goto(&loop);
+ BIND(&loop);
+ GotoIf(TaggedIsSmi(var_input.value()), &if_smi);
+ // We can handle both HeapNumber and Oddball here, since Oddball has the
+ // same layout as the HeapNumber for the HeapNumber::value field. This
+ // way we can also properly optimize stores of oddballs to typed arrays.
+ TNode<HeapObject> heap_object = CAST(var_input.value());
+ GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball);
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball,
+ &convert);
+
+ BIND(&if_heapnumber_or_oddball);
+ {
+ TNode<Float64T> value =
+ LoadObjectField<Float64T>(heap_object, HeapNumber::kValueOffset);
+ var_result = TruncateFloat64ToFloat32(value);
+ Goto(&done);
+ }
+
+ BIND(&if_smi);
+ {
+ TNode<Int32T> value = SmiToInt32(CAST(var_input.value()));
+ var_result = RoundInt32ToFloat32(value);
+ Goto(&done);
+ }
+
+ BIND(&convert);
+ {
+ var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
+ Goto(&loop);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
+template <>
+TNode<Float64T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Float64T>(
+ TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
+ DCHECK(IsTypedArrayElementsKind(elements_kind));
+ CHECK_EQ(elements_kind, FLOAT64_ELEMENTS);
+
+ TVARIABLE(Float64T, var_result);
+ TVARIABLE(Object, var_input, input);
+ Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this),
+ convert(this), loop(this, &var_input);
+ Goto(&loop);
+ BIND(&loop);
+ GotoIf(TaggedIsSmi(var_input.value()), &if_smi);
+ // We can handle both HeapNumber and Oddball here, since Oddball has the
+ // same layout as the HeapNumber for the HeapNumber::value field. This
+ // way we can also properly optimize stores of oddballs to typed arrays.
+ TNode<HeapObject> heap_object = CAST(var_input.value());
+ GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball);
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball,
+ &convert);
+
+ BIND(&if_heapnumber_or_oddball);
+ {
+ var_result =
+ LoadObjectField<Float64T>(heap_object, HeapNumber::kValueOffset);
+ Goto(&done);
+ }
+
+ BIND(&if_smi);
+ {
+ TNode<Int32T> value = SmiToInt32(CAST(var_input.value()));
+ var_result = ChangeInt32ToFloat64(value);
+ Goto(&done);
+ }
+
+ BIND(&convert);
+ {
+ var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
Goto(&loop);
}
@@ -9763,6 +9725,34 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
return var_result.value();
}
+Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
+ TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
+ DCHECK(IsTypedArrayElementsKind(elements_kind));
+
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ return PrepareValueForWriteToTypedArray<Word32T>(input, elements_kind,
+ context);
+ case FLOAT32_ELEMENTS:
+ return PrepareValueForWriteToTypedArray<Float32T>(input, elements_kind,
+ context);
+ case FLOAT64_ELEMENTS:
+ return PrepareValueForWriteToTypedArray<Float64T>(input, elements_kind,
+ context);
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ return ToBigInt(context, input);
+ default:
+ UNREACHABLE();
+ }
+}
+
void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
TVariable<UintPtrT>* var_low,
TVariable<UintPtrT>* var_high) {
@@ -9952,8 +9942,8 @@ void CodeStubAssembler::EmitElementStore(
if (IsGrowStoreMode(store_mode) &&
!(IsSealedElementsKind(elements_kind) ||
IsNonextensibleElementsKind(elements_kind))) {
- elements = CAST(CheckForCapacityGrow(object, elements, elements_kind,
- length, intptr_key, bailout));
+ elements = CheckForCapacityGrow(object, elements, elements_kind, length,
+ intptr_key, bailout);
} else {
GotoIfNot(UintPtrLessThan(Unsigned(intptr_key), length), bailout);
}
@@ -9973,8 +9963,8 @@ void CodeStubAssembler::EmitElementStore(
IsNonextensibleElementsKind(elements_kind))) {
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
- elements = CopyElementsOnWrite(object, elements, elements_kind, length,
- parameter_mode, bailout);
+ elements = CopyElementsOnWrite(object, elements, elements_kind,
+ Signed(length), bailout);
}
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
@@ -9982,11 +9972,11 @@ void CodeStubAssembler::EmitElementStore(
parameter_mode);
}
-Node* CodeStubAssembler::CheckForCapacityGrow(
+TNode<FixedArrayBase> CodeStubAssembler::CheckForCapacityGrow(
TNode<JSObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
TNode<UintPtrT> length, TNode<IntPtrT> key, Label* bailout) {
DCHECK(IsFastElementsKind(kind));
- VARIABLE(checked_elements, MachineRepresentation::kTagged);
+ TVARIABLE(FixedArrayBase, checked_elements);
Label grow_case(this), no_grow_case(this), done(this),
grow_bailout(this, Label::kDeferred);
@@ -10003,16 +9993,15 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
{
TNode<IntPtrT> current_capacity =
SmiUntag(LoadFixedArrayBaseLength(elements));
- checked_elements.Bind(elements);
+ checked_elements = elements;
Label fits_capacity(this);
// If key is negative, we will notice in Runtime::kGrowArrayElements.
GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
{
- Node* new_elements =
- TryGrowElementsCapacity(object, elements, kind, key, current_capacity,
- INTPTR_PARAMETERS, &grow_bailout);
- checked_elements.Bind(new_elements);
+ TNode<FixedArrayBase> new_elements = TryGrowElementsCapacity(
+ object, elements, kind, key, current_capacity, &grow_bailout);
+ checked_elements = new_elements;
Goto(&fits_capacity);
}
@@ -10023,8 +10012,9 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
TNode<Object> maybe_elements = CallRuntime(
Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
GotoIf(TaggedIsSmi(maybe_elements), bailout);
- CSA_ASSERT(this, IsFixedArrayWithKind(CAST(maybe_elements), kind));
- checked_elements.Bind(maybe_elements);
+ TNode<FixedArrayBase> new_elements = CAST(maybe_elements);
+ CSA_ASSERT(this, IsFixedArrayWithKind(new_elements, kind));
+ checked_elements = new_elements;
Goto(&fits_capacity);
}
@@ -10040,7 +10030,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
BIND(&no_grow_case);
{
GotoIfNot(UintPtrLessThan(key, length), bailout);
- checked_elements.Bind(elements);
+ checked_elements = elements;
Goto(&done);
}
@@ -10050,16 +10040,15 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
TNode<FixedArrayBase> CodeStubAssembler::CopyElementsOnWrite(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
- Node* length, ParameterMode mode, Label* bailout) {
+ TNode<IntPtrT> length, Label* bailout) {
TVARIABLE(FixedArrayBase, new_elements_var, elements);
Label done(this);
GotoIfNot(IsFixedCOWArrayMap(LoadMap(elements)), &done);
{
- Node* capacity =
- TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
+ TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
TNode<FixedArrayBase> new_elements = GrowElementsCapacity(
- object, elements, kind, kind, length, capacity, mode, bailout);
+ object, elements, kind, kind, length, capacity, bailout);
new_elements_var = new_elements;
Goto(&done);
}
@@ -10085,8 +10074,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
Label done(this);
GotoIf(TaggedEqual(elements, EmptyFixedArrayConstant()), &done);
- // TODO(ishell): Use OptimalParameterMode().
- ParameterMode mode = INTPTR_PARAMETERS;
+ // TODO(ishell): Use BInt for elements_length and array_length.
TNode<IntPtrT> elements_length =
SmiUntag(LoadFixedArrayBaseLength(elements));
TNode<IntPtrT> array_length = Select<IntPtrT>(
@@ -10100,7 +10088,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0)));
GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
- elements_length, mode, bailout);
+ elements_length, bailout);
Goto(&done);
BIND(&done);
}
@@ -10316,10 +10304,10 @@ template TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<UintPtrT>(
TNode<UintPtrT> end_index, const FastLoopBody<UintPtrT>& body,
int increment, IndexAdvanceMode advance_mode);
-void CodeStubAssembler::BuildFastFixedArrayForEach(
+void CodeStubAssembler::BuildFastArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
CSA_SLOW_ASSERT(this, MatchesParameterMode(first_element_inclusive, mode));
@@ -10339,14 +10327,14 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
}
return;
@@ -10364,15 +10352,16 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop<IntPtrT>(
vars, start, limit,
- [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
+ [&](TNode<IntPtrT> offset) { body(CAST(fixed_array), offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
+template <typename TIndex>
void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
- Node* element_count, Label* doesnt_fit, int base_size, ParameterMode mode) {
- GotoIf(FixedArraySizeDoesntFitInNewSpace(element_count, base_size, mode),
+ TNode<TIndex> element_count, Label* doesnt_fit, int base_size) {
+ GotoIf(FixedArraySizeDoesntFitInNewSpace(element_count, base_size),
doesnt_fit);
}
@@ -11013,7 +11002,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
BIND(&if_boolean);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBoolean);
Goto(if_equal);
}
@@ -11095,60 +11084,75 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_smi);
{
Label if_right_smi(this), if_right_not_smi(this);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
BIND(&if_right_smi);
{
// We have already checked for {left} and {right} being the same value,
// so when we get here they must be different Smis.
- CombineFeedback(var_type_feedback,
- CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
}
BIND(&if_right_not_smi);
- TNode<Map> right_map = LoadMap(CAST(right));
- Label if_right_heapnumber(this), if_right_boolean(this),
- if_right_bigint(this, Label::kDeferred),
- if_right_receiver(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- // {left} is Smi and {right} is not HeapNumber or Smi.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
- TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
- Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
- &if_notequal);
-
- BIND(&if_right_heapnumber);
{
- var_left_float = SmiToFloat64(CAST(left));
- var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
- Goto(&do_float_comparison);
- }
+ TNode<Map> right_map = LoadMap(CAST(right));
+ Label if_right_heapnumber(this), if_right_boolean(this),
+ if_right_oddball(this), if_right_bigint(this, Label::kDeferred),
+ if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- BIND(&if_right_boolean);
- {
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
- }
+ // {left} is Smi and {right} is not HeapNumber or Smi.
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
+ GotoIf(IsJSReceiverInstanceType(right_type), &if_right_receiver);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_bigint);
- {
- result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), right, left));
- Goto(&end);
- }
+ BIND(&if_right_heapnumber);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = SmiToFloat64(CAST(left));
+ var_right_float = LoadHeapNumberValue(CAST(right));
+ Goto(&do_float_comparison);
+ }
- BIND(&if_right_receiver);
- {
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_right = CallStub(callable, context, right);
- Goto(&loop);
+ BIND(&if_right_oddball);
+ {
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
+ }
+
+ BIND(&if_right_bigint);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
+ Goto(&end);
+ }
+
+ BIND(&if_right_receiver);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiver);
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_right = CallStub(callable, context, right);
+ Goto(&loop);
+ }
}
}
@@ -11187,29 +11191,41 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_number);
{
Label if_right_not_number(this);
+
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number);
var_left_float = LoadHeapNumberValue(CAST(left));
var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
BIND(&if_right_not_number);
{
- Label if_right_boolean(this);
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ Label if_right_oddball(this);
+
GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
GotoIf(IsBigIntInstanceType(right_type), &use_symmetry);
- Branch(IsJSReceiverInstanceType(right_type), &use_symmetry,
- &if_notequal);
+ GotoIf(IsJSReceiverInstanceType(right_type), &use_symmetry);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_boolean);
+ BIND(&if_right_oddball);
{
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right =
+ LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
}
}
}
@@ -11218,6 +11234,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
Label if_right_heapnumber(this), if_right_bigint(this),
if_right_string(this), if_right_boolean(this);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
GotoIf(IsStringInstanceType(right_type), &if_right_string);
@@ -11227,9 +11245,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_heapnumber);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
NoContextConstant(), left, right));
Goto(&end);
@@ -11237,7 +11253,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_bigint);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ // We already have BigInt feedback.
result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), left, right));
Goto(&end);
@@ -11245,9 +11261,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_string);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
result = CAST(CallRuntime(Runtime::kBigIntEqualToString,
NoContextConstant(), left, right));
Goto(&end);
@@ -11255,9 +11269,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
Goto(&loop);
}
@@ -11266,35 +11279,60 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_oddball);
{
Label if_left_boolean(this), if_left_not_boolean(this);
- Branch(IsBooleanMap(left_map), &if_left_boolean, &if_left_not_boolean);
+ GotoIf(IsBooleanMap(left_map), &if_left_boolean);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNullOrUndefined);
+ GotoIf(IsUndetectableMap(left_map), &if_left_not_boolean);
+ }
+ Goto(&if_left_not_boolean);
BIND(&if_left_not_boolean);
{
// {left} is either Null or Undefined. Check if {right} is
// undetectable (which includes Null and Undefined).
- Label if_right_undetectable(this), if_right_not_undetectable(this);
- Branch(IsUndetectableMap(right_map), &if_right_undetectable,
- &if_right_not_undetectable);
+ Label if_right_undetectable(this), if_right_number(this),
+ if_right_oddball(this),
+ if_right_not_number_or_oddball_or_undetectable(this);
+ GotoIf(IsUndetectableMap(right_map), &if_right_undetectable);
+ GotoIf(IsHeapNumberInstanceType(right_type), &if_right_number);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ Goto(&if_right_not_number_or_oddball_or_undetectable);
BIND(&if_right_undetectable);
{
- if (var_type_feedback != nullptr) {
- // If {right} is undetectable, it must be either also
- // Null or Undefined, or a Receiver (aka document.all).
- *var_type_feedback = SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- }
+ // If {right} is undetectable, it must be either also
+ // Null or Undefined, or a Receiver (aka document.all).
+ CombineFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
Goto(&if_equal);
}
- BIND(&if_right_not_undetectable);
+ BIND(&if_right_number);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_oddball);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_not_number_or_oddball_or_undetectable);
{
if (var_type_feedback != nullptr) {
// Track whether {right} is Null, Undefined or Receiver.
- *var_type_feedback = SmiConstant(
+ CombineFeedback(
+ var_type_feedback,
CompareOperationFeedback::kReceiverOrNullOrUndefined);
GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
}
Goto(&if_notequal);
}
@@ -11302,9 +11340,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
// If {right} is a Boolean too, it must be a different Boolean.
GotoIf(TaggedEqual(right_map, left_map), &if_notequal);
@@ -11387,9 +11424,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
// {right} is a Primitive, and neither Null or Undefined;
// convert {left} to Primitive too.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
var_left = CallStub(callable, context, left);
Goto(&loop);
@@ -11400,6 +11435,12 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&do_right_stringtonumber);
{
+ if (var_type_feedback != nullptr) {
+ TNode<Map> right_map = LoadMap(CAST(right));
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ CombineFeedback(var_type_feedback,
+ CollectFeedbackForString(right_type));
+ }
var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
Goto(&loop);
}
@@ -11678,15 +11719,47 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
BIND(&if_lhsisoddball);
{
- STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
- GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
- GotoIf(Int32LessThan(rhs_instance_type,
- Int32Constant(ODDBALL_TYPE)),
- &if_not_equivalent_types);
- OverwriteFeedback(
- var_type_feedback,
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- Goto(&if_notequal);
+ Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+ Branch(IsBooleanMap(lhs_map), &if_lhsisboolean,
+ &if_lhsisnotboolean);
+
+ BIND(&if_lhsisboolean);
+ {
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ GotoIf(IsBooleanMap(rhs_map), &if_notequal);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_lhsisnotboolean);
+ {
+ Label if_rhsisheapnumber(this), if_rhsisnotheapnumber(this);
+
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE ==
+ ODDBALL_TYPE);
+ GotoIf(Int32LessThan(rhs_instance_type,
+ Int32Constant(ODDBALL_TYPE)),
+ &if_not_equivalent_types);
+
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber,
+ &if_rhsisnotheapnumber);
+
+ BIND(&if_rhsisheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_rhsisnotheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ Goto(&if_notequal);
+ }
+ }
}
BIND(&if_lhsissymbol);
@@ -11742,7 +11815,14 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
}
BIND(&if_rhsisnotnumber);
- Goto(&if_not_equivalent_types);
+ {
+ TNode<Uint16T> rhs_instance_type = LoadMapInstanceType(rhs_map);
+ GotoIfNot(IsOddballInstanceType(rhs_instance_type),
+ &if_not_equivalent_types);
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_notequal);
+ }
}
}
}
@@ -12380,28 +12460,6 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
UNREACHABLE();
}
-// ES #sec-createarrayiterator
-TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
- TNode<Context> context, TNode<Object> object, IterationKind kind) {
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<Map> iterator_map = CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX));
- TNode<HeapObject> iterator = Allocate(JSArrayIterator::kHeaderSize);
- StoreMapNoWriteBarrier(iterator, iterator_map);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectOffset, object);
- StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiConstant(0));
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kKindOffset,
- SmiConstant(Smi::FromInt(static_cast<int>(kind))));
- return CAST(iterator);
-}
-
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
SloppyTNode<Context> context, SloppyTNode<Object> value,
SloppyTNode<Oddball> done) {
@@ -12507,11 +12565,31 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
+TNode<JSArrayBuffer> CodeStubAssembler::GetTypedArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> array) {
+ Label call_runtime(this), done(this);
+ TVARIABLE(Object, var_result);
+
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
+ GotoIf(IsDetachedBuffer(buffer), &call_runtime);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStorePtr(buffer);
+ GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
+ var_result = buffer;
+ Goto(&done);
+
+ BIND(&call_runtime);
+ {
+ var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return CAST(var_result.value());
+}
+
CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
- TNode<IntPtrT> argc, TNode<RawPtrT> fp,
- ReceiverMode receiver_mode)
+ TNode<IntPtrT> argc, TNode<RawPtrT> fp)
: assembler_(assembler),
- receiver_mode_(receiver_mode),
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
@@ -12531,7 +12609,6 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
}
TNode<Object> CodeStubArguments::GetReceiver() const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12541,7 +12618,6 @@ TNode<Object> CodeStubArguments::GetReceiver() const {
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12575,26 +12651,6 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
}
TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
- int index, TNode<Object> default_value) {
- CodeStubAssembler::TVariable<Object> result(assembler_);
- CodeStubAssembler::Label argument_missing(assembler_),
- argument_done(assembler_, &result);
-
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(
- assembler_->IntPtrConstant(index), argc_),
- &argument_missing);
- result = AtIndex(index);
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_missing);
- result = default_value;
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_done);
- return result.value();
-}
-
-TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
TNode<IntPtrT> index, TNode<Object> default_value) {
CodeStubAssembler::TVariable<Object> result(assembler_);
CodeStubAssembler::Label argument_missing(assembler_),
@@ -12641,13 +12697,8 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count;
- if (receiver_mode_ == ReceiverMode::kHasReceiver) {
- pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
- } else {
- pop_count = argc_;
- }
-
+ TNode<IntPtrT> pop_count =
+ assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
assembler_->PopAndReturn(pop_count, value);
}
@@ -13045,35 +13096,6 @@ void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
BIND(&ok);
}
-TNode<Context> CodeStubAssembler::AllocateSyntheticFunctionContext(
- TNode<NativeContext> native_context, int slots) {
- DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
- TNode<HeapObject> context_heap_object =
- AllocateInNewSpace(FixedArray::SizeFor(slots));
- InitializeSyntheticFunctionContext(native_context, context_heap_object,
- slots);
- return CAST(context_heap_object);
-}
-
-void CodeStubAssembler::InitializeSyntheticFunctionContext(
- TNode<NativeContext> native_context, TNode<HeapObject> context_heap_object,
- int slots) {
- DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
- TNode<Map> map = CAST(
- LoadContextElement(native_context, Context::FUNCTION_CONTEXT_MAP_INDEX));
- StoreMapNoWriteBarrier(context_heap_object, map);
- StoreObjectFieldNoWriteBarrier(context_heap_object, FixedArray::kLengthOffset,
- SmiConstant(slots));
-
- TNode<Context> context = CAST(context_heap_object);
- const TNode<Object> empty_scope_info =
- LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
- StoreContextElementNoWriteBarrier(context, Context::SCOPE_INFO_INDEX,
- empty_scope_info);
- StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
- UndefinedConstant());
-}
-
TNode<Object> CodeStubAssembler::CallApiCallback(
TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver) {
@@ -13085,17 +13107,9 @@ TNode<Object> CodeStubAssembler::CallApiCallback(
TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver,
TNode<Object> value) {
- // CallApiCallback receives the first four arguments in registers
- // (callback, argc, data and holder). The last arguments are in the stack in
- // JS ordering. See ApiCallbackDescriptor.
Callable callable = CodeFactory::CallApiCallback(isolate());
-#ifdef V8_REVERSE_JSARGS
- return CallStub(callable, context, callback, argc, data, holder, value,
- receiver);
-#else
return CallStub(callable, context, callback, argc, data, holder, receiver,
value);
-#endif
}
TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index b01729c73d..de2c16d35f 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -15,6 +15,7 @@
#include "src/compiler/code-assembler.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/js-function.h"
#include "src/objects/objects.h"
#include "src/objects/promise.h"
#include "src/objects/shared-function-info.h"
@@ -107,130 +108,104 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector)
-#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationMementoMap, allocation_memento_map, AllocationMementoMap) \
- V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
- AllocationSiteWithoutWeakNextMap) \
- V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
- V(arguments_to_string, arguments_to_string, ArgumentsToString) \
- V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
- ArrayBoilerplateDescriptionMap) \
- V(Array_string, Array_string, ArrayString) \
- V(array_to_string, array_to_string, ArrayToString) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(boolean_to_string, boolean_to_string, BooleanToString) \
- V(CellMap, cell_map, CellMap) \
- V(CodeMap, code_map, CodeMap) \
- V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \
- V(ConsStringMap, cons_string_map, ConsStringMap) \
- V(constructor_string, constructor_string, ConstructorString) \
- V(CoverageInfoMap, coverage_info_map, CoverageInfoMap) \
- V(date_to_string, date_to_string, DateToString) \
- V(default_string, default_string, DefaultString) \
- V(EmptyByteArray, empty_byte_array, EmptyByteArray) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(error_to_string, error_to_string, ErrorToString) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(Function_string, function_string, FunctionString) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(function_to_string, function_to_string, FunctionToString) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(Infinity_string, Infinity_string, InfinityString) \
- V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \
- IsConcatSpreadableSymbol) \
- V(iterator_symbol, iterator_symbol, IteratorSymbol) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(match_symbol, match_symbol, MatchSymbol) \
- V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
- V(message_string, message_string, MessageString) \
- V(MetaMap, meta_map, MetaMap) \
- V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(name_string, name_string, NameString) \
- V(NanValue, nan_value, Nan) \
- V(NaN_string, NaN_string, NaNString) \
- V(next_string, next_string, NextString) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(null_to_string, null_to_string, NullToString) \
- V(NullValue, null_value, Null) \
- V(number_string, number_string, numberString) \
- V(number_to_string, number_to_string, NumberToString) \
- V(Object_string, Object_string, ObjectString) \
- V(object_to_string, object_to_string, ObjectToString) \
- V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \
- V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
- V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \
- V(promise_forwarding_handler_symbol, promise_forwarding_handler_symbol, \
- PromiseForwardingHandlerSymbol) \
- V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \
- PromiseFulfillReactionJobTaskMap) \
- V(promise_handled_by_symbol, promise_handled_by_symbol, \
- PromiseHandledBySymbol) \
- V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap) \
- V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map, \
- PromiseRejectReactionJobTaskMap) \
- V(PromiseResolveThenableJobTaskMap, promise_resolve_thenable_job_task_map, \
- PromiseResolveThenableJobTaskMap) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(PrototypeInfoMap, prototype_info_map, PrototypeInfoMap) \
- V(replace_symbol, replace_symbol, ReplaceSymbol) \
- V(regexp_to_string, regexp_to_string, RegexpToString) \
- V(resolve_string, resolve_string, ResolveString) \
- V(return_string, return_string, ReturnString) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \
- SloppyArgumentsElementsMap) \
- V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \
- SmallOrderedHashSetMap) \
- V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \
- SmallOrderedHashMapMap) \
- V(SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \
- SmallOrderedNameDictionaryMap) \
- V(species_symbol, species_symbol, SpeciesSymbol) \
- V(StaleRegister, stale_register, StaleRegister) \
- V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
- V(string_string, string_string, StringString) \
- V(string_to_string, string_to_string, StringToString) \
- V(StringMap, string_map, StringMap) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(then_string, then_string, ThenString) \
- V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \
- V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(BreakPointMap, break_point_map, BreakPointMap) \
- V(BreakPointInfoMap, break_point_info_map, BreakPointInfoMap) \
- V(CachedTemplateObjectMap, cached_template_object_map, \
- CachedTemplateObjectMap) \
- V(UncompiledDataWithoutPreparseDataMap, \
- uncompiled_data_without_preparse_data_map, \
- UncompiledDataWithoutPreparseDataMap) \
- V(UncompiledDataWithPreparseDataMap, uncompiled_data_with_preparse_data_map, \
- UncompiledDataWithPreparseDataMap) \
- V(undefined_to_string, undefined_to_string, UndefinedToString) \
- V(UndefinedValue, undefined_value, Undefined) \
- V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
- V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \
- V(zero_string, zero_string, ZeroString) \
- TORQUE_INTERNAL_MAP_CSA_LIST(V)
+#define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \
+ V, rootIndexName, rootAccessorName, class_name) \
+ V(rootIndexName, rootAccessorName, class_name##Map)
+
+#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
+ AllocationSiteWithoutWeakNextMap) \
+ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
+ V(arguments_to_string, arguments_to_string, ArgumentsToString) \
+ V(Array_string, Array_string, ArrayString) \
+ V(array_to_string, array_to_string, ArrayToString) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(boolean_to_string, boolean_to_string, BooleanToString) \
+ V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \
+ V(ConsStringMap, cons_string_map, ConsStringMap) \
+ V(constructor_string, constructor_string, ConstructorString) \
+ V(date_to_string, date_to_string, DateToString) \
+ V(default_string, default_string, DefaultString) \
+ V(EmptyByteArray, empty_byte_array, EmptyByteArray) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptyScopeInfo, empty_scope_info, EmptyScopeInfo) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(error_to_string, error_to_string, ErrorToString) \
+ V(errors_string, errors_string, ErrorsString) \
+ V(FalseValue, false_value, False) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(Function_string, function_string, FunctionString) \
+ V(function_to_string, function_to_string, FunctionToString) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(Infinity_string, Infinity_string, InfinityString) \
+ V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \
+ IsConcatSpreadableSymbol) \
+ V(iterator_symbol, iterator_symbol, IteratorSymbol) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(match_symbol, match_symbol, MatchSymbol) \
+ V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(message_string, message_string, MessageString) \
+ V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(name_string, name_string, NameString) \
+ V(NanValue, nan_value, Nan) \
+ V(NaN_string, NaN_string, NaNString) \
+ V(next_string, next_string, NextString) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(null_to_string, null_to_string, NullToString) \
+ V(NullValue, null_value, Null) \
+ V(number_string, number_string, NumberString) \
+ V(number_to_string, number_to_string, NumberToString) \
+ V(Object_string, Object_string, ObjectString) \
+ V(object_to_string, object_to_string, ObjectToString) \
+ V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \
+ V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \
+ V(promise_forwarding_handler_symbol, promise_forwarding_handler_symbol, \
+ PromiseForwardingHandlerSymbol) \
+ V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \
+ PromiseFulfillReactionJobTaskMap) \
+ V(promise_handled_by_symbol, promise_handled_by_symbol, \
+ PromiseHandledBySymbol) \
+ V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap) \
+ V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map, \
+ PromiseRejectReactionJobTaskMap) \
+ V(PromiseResolveThenableJobTaskMap, promise_resolve_thenable_job_task_map, \
+ PromiseResolveThenableJobTaskMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(replace_symbol, replace_symbol, ReplaceSymbol) \
+ V(regexp_to_string, regexp_to_string, RegexpToString) \
+ V(resolve_string, resolve_string, ResolveString) \
+ V(return_string, return_string, ReturnString) \
+ V(species_symbol, species_symbol, SpeciesSymbol) \
+ V(StaleRegister, stale_register, StaleRegister) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
+ V(string_string, string_string, StringString) \
+ V(string_to_string, string_to_string, StringToString) \
+ V(StringMap, string_map, StringMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(then_string, then_string, ThenString) \
+ V(toString_string, toString_string, ToStringString) \
+ V(to_primitive_symbol, to_primitive_symbol, ToPrimitiveSymbol) \
+ V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \
+ V(TrueValue, true_value, True) \
+ V(undefined_to_string, undefined_to_string, UndefinedToString) \
+ V(UndefinedValue, undefined_value, Undefined) \
+ V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(valueOf_string, valueOf_string, ValueOfString) \
+ V(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol, \
+ WasmWrappedObjectSymbol) \
+ V(zero_string, zero_string, ZeroString) \
+ UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR( \
+ UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER, V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
@@ -289,10 +264,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_DEBUG_INFO(name) \
{ #name, __FILE__, __LINE__ }
#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
-#define VARIABLE(name, ...) \
- Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
-#define VARIABLE_CONSTRUCTOR(name, ...) \
- name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) \
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \
@@ -302,8 +273,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
-#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
-#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__)
#endif // DEBUG
@@ -367,15 +336,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#endif
}
- MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
- return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
- : MachineRepresentation::kTaggedSigned;
- }
-
- MachineRepresentation OptimalParameterRepresentation() const {
- return ParameterRepresentation(OptimalParameterMode());
- }
-
TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; }
// TODO(v8:9708): remove once all uses are ported.
@@ -384,27 +344,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<IntPtrT>(value);
}
- template <typename TIndex>
- TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value);
-
- Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) return SmiTag(value);
- return value;
- }
-
- Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
- return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
- }
+ TNode<Smi> ParameterToTagged(TNode<Smi> value) { return value; }
- TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
- if (mode != SMI_PARAMETERS) return SmiTag(value);
- return UncheckedCast<Smi>(value);
- }
+ TNode<Smi> ParameterToTagged(TNode<IntPtrT> value) { return SmiTag(value); }
- Node* TaggedToParameter(SloppyTNode<Smi> value, ParameterMode mode) {
- if (mode != SMI_PARAMETERS) return SmiUntag(value);
- return value;
- }
+ template <typename TIndex>
+ TNode<TIndex> TaggedToParameter(TNode<Smi> value);
bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) {
if (mode == ParameterMode::SMI_PARAMETERS) {
@@ -449,11 +394,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> TaggedIndexToSmi(TNode<TaggedIndex> value);
TNode<TaggedIndex> SmiToTaggedIndex(TNode<Smi> value);
- // Pointer compression specific. Returns true if the upper 32 bits of a Smi
- // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi
- // can be directly used as an index in element offset computation.
- TNode<BoolT> IsValidSmiIndex(TNode<Smi> smi);
-
// Pointer compression specific. Ensures that the upper 32 bits of a Smi
// contain the sign of a lower 32 bits so that the Smi can be directly used
// as an index in element offset computation.
@@ -471,37 +411,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<String> TaggedToDirectString(TNode<Object> value, Label* fail);
- TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
- GotoIfNot(IsNumber(value), fail);
- return UncheckedCast<Number>(value);
- }
-
TNode<HeapObject> TaggedToHeapObject(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
return UncheckedCast<HeapObject>(value);
}
- TNode<JSAggregateError> HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail);
-
- TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArray(heap_object), fail);
- return UncheckedCast<JSArray>(heap_object);
- }
-
- TNode<JSArrayBuffer> HeapObjectToJSArrayBuffer(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArrayBuffer(heap_object), fail);
- return UncheckedCast<JSArrayBuffer>(heap_object);
- }
-
- TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
- TNode<Object> value, Label* fail) {
- GotoIf(TaggedIsSmi(value), fail);
- TNode<HeapObject> heap_object = CAST(value);
- GotoIfNot(IsFastJSArray(heap_object, context), fail);
- return UncheckedCast<JSArray>(heap_object);
+ TNode<Uint16T> Uint16Constant(uint16_t t) {
+ return UncheckedCast<Uint16T>(Int32Constant(t));
}
TNode<JSDataView> HeapObjectToJSDataView(TNode<HeapObject> heap_object,
@@ -656,13 +572,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename TIndex>
TNode<TIndex> IntPtrOrSmiConstant(int value);
- // TODO(v8:9708): remove once all uses are ported.
- Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
-
- bool IsIntPtrOrSmiConstantZero(TNode<Smi> test);
- bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test);
- // TODO(v8:9708): remove once all uses are ported.
- bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
@@ -713,10 +622,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
BitcastTaggedToWordForTagAndSmiBits(b))); \
} else { \
DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \
@@ -777,22 +682,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
- Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiShl(CAST(a), shift);
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordShl(a, shift);
- }
- }
+ TNode<Smi> WordOrSmiShr(TNode<Smi> a, int shift) { return SmiShr(a, shift); }
- Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiShr(CAST(a), shift);
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordShr(a, shift);
- }
+ TNode<IntPtrT> WordOrSmiShr(TNode<IntPtrT> a, int shift) {
+ return WordShr(a, shift);
}
#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
@@ -803,10 +696,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
} else { \
DCHECK_EQ(kTaggedSize, kInt32Size); \
DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
return Int32OpName( \
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \
@@ -885,9 +774,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
AllocationFlags flags = kNone);
- TNode<HeapObject> AllocateAllowLOS(TNode<IntPtrT> size) {
- return Allocate(size, AllocationFlag::kAllowLargeObjectAllocation);
- }
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
@@ -919,7 +805,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void Check(SloppyTNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void FailAssert(const char* message, const char* file, int line,
+ void FailAssert(const char* message,
+ const std::vector<FileAndLine>& files_and_lines,
std::initializer_list<ExtraNode> extra_nodes = {});
void FastCheck(TNode<BoolT> condition);
@@ -1176,12 +1063,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
- TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object, int offset) {
+ TNode<Object> LoadObjectField(TNode<HeapObject> object, int offset) {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
- TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
- SloppyTNode<IntPtrT> offset) {
+ TNode<Object> LoadObjectField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset) {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
@@ -1348,7 +1235,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// This is only used on a newly allocated PropertyArray which
// doesn't have an existing hash.
void InitializePropertyArrayLength(TNode<PropertyArray> property_array,
- Node* length, ParameterMode mode);
+ TNode<IntPtrT> length);
// Check if the map is set for slow properties.
TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
@@ -1428,52 +1315,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ template <typename TIndex>
TNode<Object> LoadFixedArrayElement(
- TNode<FixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ TNode<FixedArray> object, TNode<TIndex> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
CheckBounds check_bounds = CheckBounds::kAlways);
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return LoadFixedArrayElement(object, index, additional_offset,
- parameter_mode, needs_poisoning,
- CheckBounds::kDebugOnly);
- }
-
- TNode<Object> LoadFixedArrayElement(
- TNode<FixedArray> object, TNode<IntPtrT> index,
- LoadSensitivity needs_poisoning,
- CheckBounds check_bounds = CheckBounds::kAlways) {
- return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
- needs_poisoning, check_bounds);
- }
- // This doesn't emit a bounds-check. As part of the security-performance
- // tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
- TNode<IntPtrT> index,
- LoadSensitivity needs_poisoning) {
- return LoadFixedArrayElement(object, index, needs_poisoning,
- CheckBounds::kDebugOnly);
- }
-
- TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, index, additional_offset,
- INTPTR_PARAMETERS, needs_poisoning);
+ needs_poisoning, CheckBounds::kDebugOnly);
}
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, INTPTR_PARAMETERS,
- needs_poisoning);
+ additional_offset, needs_poisoning);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
@@ -1481,12 +1342,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, INTPTR_PARAMETERS,
- needs_poisoning, CheckBounds::kDebugOnly);
- }
- TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
- TNode<Smi> index) {
- return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
+ additional_offset, needs_poisoning,
+ CheckBounds::kDebugOnly);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
@@ -1497,56 +1354,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array>
- TNode<Int32T> LoadAndUntagToWord32ArrayElement(
- TNode<Array> array, int array_header_size, Node* index,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<Int32T> LoadAndUntagToWord32ArrayElement(TNode<Array> array,
+ int array_header_size,
+ TNode<IntPtrT> index,
+ int additional_offset = 0);
// Load an array element from a FixedArray, untag it and return it as Word32.
TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
- TNode<FixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
-
- TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0) {
- return LoadAndUntagToWord32FixedArrayElement(
- object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS);
- }
+ TNode<FixedArray> object, TNode<IntPtrT> index,
+ int additional_offset = 0);
// Load an array element from a WeakFixedArray.
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
-
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return LoadWeakFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, INTPTR_PARAMETERS,
- needs_poisoning);
- }
+ TNode<MaybeObject> LoadWeakFixedArrayElement(TNode<WeakFixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0);
// Load an array element from a FixedDoubleArray.
TNode<Float64T> LoadFixedDoubleArrayElement(
- SloppyTNode<FixedDoubleArray> object, Node* index,
- MachineType machine_type, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- Label* if_hole = nullptr);
-
- TNode<Float64T> LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
- TNode<Smi> index,
- Label* if_hole = nullptr) {
- return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
- SMI_PARAMETERS, if_hole);
- }
-
- TNode<Float64T> LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
- TNode<IntPtrT> index,
- Label* if_hole = nullptr) {
- return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
- INTPTR_PARAMETERS, if_hole);
- }
+ TNode<FixedDoubleArray> object, TNode<IntPtrT> index,
+ Label* if_hole = nullptr,
+ MachineType machine_type = MachineType::Float64());
// Load an array element from a FixedArray, FixedDoubleArray or a
// NumberDictionary (depending on the |elements_kind|) and return
@@ -1566,23 +1393,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>);
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
- TNode<Smi> index,
- Label* if_hole = nullptr);
- TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
TNode<IntPtrT> index,
Label* if_hole = nullptr);
- TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
- TNode<UintPtrT> index,
- Label* if_hole = nullptr) {
- return LoadDoubleWithHoleCheck(array, Signed(index), if_hole);
- }
TNode<BoolT> IsDoubleHole(TNode<Object> base, TNode<IntPtrT> offset);
// Load Float64 value by |base| + |offset| address. If the value is a double
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
// check is generated.
TNode<Float64T> LoadDoubleWithHoleCheck(
- SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
+ TNode<Object> base, TNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
TNode<Numeric> LoadFixedTypedArrayElementAsTagged(TNode<RawPtrT> data_pointer,
TNode<UintPtrT> index,
@@ -1607,17 +1426,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> LoadScopeInfoHasExtensionField(TNode<ScopeInfo> scope_info);
// Context manipulation:
- TNode<Object> LoadContextElement(SloppyTNode<Context> context,
- int slot_index);
- TNode<Object> LoadContextElement(SloppyTNode<Context> context,
- SloppyTNode<IntPtrT> slot_index);
- TNode<Object> LoadContextElement(TNode<Context> context,
- TNode<Smi> slot_index);
- void StoreContextElement(SloppyTNode<Context> context, int slot_index,
- SloppyTNode<Object> value);
- void StoreContextElement(SloppyTNode<Context> context,
- SloppyTNode<IntPtrT> slot_index,
- SloppyTNode<Object> value);
void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
int slot_index,
SloppyTNode<Object> value);
@@ -1771,13 +1579,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
parameter_mode, CheckBounds::kDebugOnly);
}
- void StorePropertyArrayElement(
- TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS) {
- StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
- additional_offset, parameter_mode);
+ void StorePropertyArrayElement(TNode<PropertyArray> array,
+ TNode<IntPtrT> index, TNode<Object> value) {
+ StoreFixedArrayOrPropertyArrayElement(
+ array, index, value, UPDATE_WRITE_BARRIER, 0, INTPTR_PARAMETERS);
}
void StoreFixedArrayElement(
@@ -1824,13 +1629,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
void StoreDoubleHole(TNode<HeapObject> object, TNode<IntPtrT> offset);
- void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array, Node* index,
- ParameterMode mode = INTPTR_PARAMETERS);
- void StoreFixedDoubleArrayHoleSmi(TNode<FixedDoubleArray> array,
- TNode<Smi> index) {
- StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS);
- }
-
+ void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
+ TNode<IntPtrT> index);
void StoreFeedbackVectorSlot(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<AnyTaggedT> value,
@@ -1845,9 +1645,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> EnsureArrayPushable(TNode<Context> context, TNode<Map> map,
Label* bailout);
- void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
- Label* bailout, TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value);
+ void TryStoreArrayElement(ElementsKind kind, Label* bailout,
+ TNode<FixedArrayBase> elements, TNode<BInt> index,
+ TNode<Object> value);
// Consumes args into the array, and returns tagged new length.
TNode<Smi> BuildAppendJSArray(ElementsKind kind, TNode<JSArray> array,
CodeStubArguments* args,
@@ -1961,45 +1761,43 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
- // The ParameterMode argument is only used for the capacity parameter.
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags = kNone,
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone);
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone);
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, {},
- allocation_flags);
+ return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
+ base::nullopt, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, capacity, length, {},
+ return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
// Allocate a JSArray and initialize the header fields.
- TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
- TNode<FixedArrayBase> elements,
- TNode<Smi> length,
- TNode<AllocationSite> allocation_site = {},
- int array_header_size = JSArray::kHeaderSize);
+ TNode<JSArray> AllocateJSArray(
+ TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
+ int array_header_size = JSArray::kHeaderSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
@@ -2014,34 +1812,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// function generates significantly less code in this case.
TNode<JSArray> CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site = {},
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
TNode<JSArray> ExtractFastJSArray(TNode<Context> context,
- TNode<JSArray> array, Node* begin,
- Node* count,
- ParameterMode mode = INTPTR_PARAMETERS,
- Node* capacity = nullptr,
- TNode<AllocationSite> allocation_site = {});
-
- TNode<FixedArrayBase> AllocateFixedArray(
- ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
- AllocationFlags flags = kNone,
- SloppyTNode<Map> fixed_array_map = nullptr);
-
- TNode<FixedArrayBase> AllocateFixedArray(
- ElementsKind kind, TNode<IntPtrT> capacity, AllocationFlags flags,
- SloppyTNode<Map> fixed_array_map = nullptr) {
- return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags,
- fixed_array_map);
- }
+ TNode<JSArray> array, TNode<BInt> begin,
+ TNode<BInt> count);
+ template <typename TIndex>
TNode<FixedArrayBase> AllocateFixedArray(
- ElementsKind kind, TNode<Smi> capacity, AllocationFlags flags,
- SloppyTNode<Map> fixed_array_map = nullptr) {
- return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags,
- fixed_array_map);
- }
+ ElementsKind kind, TNode<TIndex> capacity, AllocationFlags flags = kNone,
+ base::Optional<TNode<Map>> fixed_array_map = base::nullopt);
TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
Label* if_bailout);
@@ -2089,14 +1870,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return result;
}
- TNode<PropertyArray> AllocatePropertyArray(
- Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
- AllocationFlags flags = kNone);
-
- // Perform CreateArrayIterator (ES #sec-createarrayiterator).
- TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
- TNode<Object> object,
- IterationKind mode);
+ TNode<PropertyArray> AllocatePropertyArray(TNode<IntPtrT> capacity);
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
@@ -2124,8 +1898,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> length);
void FillPropertyArrayWithUndefined(TNode<PropertyArray> array,
- Node* from_index, Node* to_index,
- ParameterMode mode = INTPTR_PARAMETERS);
+ TNode<IntPtrT> from_index,
+ TNode<IntPtrT> to_index);
enum class DestroySource { kNo, kYes };
@@ -2142,35 +1916,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// being cloned, to ensure that mutable HeapNumbers are unique between the
// source and cloned object.
void CopyPropertyArrayValues(TNode<HeapObject> from_array,
- TNode<PropertyArray> to_array, Node* length,
+ TNode<PropertyArray> to_array,
+ TNode<IntPtrT> length,
WriteBarrierMode barrier_mode,
- ParameterMode mode,
DestroySource destroy_source);
// Copies all elements from |from_array| of |length| size to
// |to_array| of the same size respecting the elements kind.
+ template <typename TIndex>
void CopyFixedArrayElements(
ElementsKind kind, TNode<FixedArrayBase> from_array,
- TNode<FixedArrayBase> to_array, Node* length,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTPTR_PARAMETERS) {
+ TNode<FixedArrayBase> to_array, TNode<TIndex> length,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
CopyFixedArrayElements(kind, from_array, kind, to_array,
- IntPtrOrSmiConstant(0, mode), length, length,
- barrier_mode, mode);
+ IntPtrOrSmiConstant<TIndex>(0), length, length,
+ barrier_mode);
}
// Copies |element_count| elements from |from_array| starting from element
// zero to |to_array| of |capacity| size respecting both array's elements
// kinds.
+ template <typename TIndex>
void CopyFixedArrayElements(
ElementsKind from_kind, TNode<FixedArrayBase> from_array,
ElementsKind to_kind, TNode<FixedArrayBase> to_array,
- TNode<IntPtrT> element_count, TNode<IntPtrT> capacity,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTPTR_PARAMETERS) {
+ TNode<TIndex> element_count, TNode<TIndex> capacity,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
- IntPtrOrSmiConstant(0, mode), element_count,
- capacity, barrier_mode, mode);
+ IntPtrOrSmiConstant<TIndex>(0), element_count,
+ capacity, barrier_mode);
}
// Copies |element_count| elements from |from_array| starting from element
@@ -2181,25 +1955,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// (i.e. that there were holes). If |convert_holes_to_undefined| is
// HoleConversionMode::kConvertToUndefined, then it must not be the case that
// IsDoubleElementsKind(to_kind).
+ template <typename TIndex>
void CopyFixedArrayElements(
ElementsKind from_kind, TNode<FixedArrayBase> from_array,
- ElementsKind to_kind, TNode<FixedArrayBase> to_array, Node* first_element,
- Node* element_count, Node* capacity,
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array,
+ TNode<TIndex> first_element, TNode<TIndex> element_count,
+ TNode<TIndex> capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
TVariable<BoolT>* var_holes_converted = nullptr);
- void CopyFixedArrayElements(
- ElementsKind from_kind, TNode<FixedArrayBase> from_array,
- ElementsKind to_kind, TNode<FixedArrayBase> to_array,
- TNode<Smi> first_element, TNode<Smi> element_count, TNode<Smi> capacity,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
- CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
- first_element, element_count, capacity, barrier_mode,
- SMI_PARAMETERS);
- }
-
void JumpIfPointersFromHereAreInteresting(TNode<Object> object,
Label* interesting);
@@ -2234,17 +1999,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<FixedDoubleArray>(base);
}
- TNode<SloppyArgumentsElements> HeapObjectToSloppyArgumentsElements(
- TNode<HeapObject> base, Label* cast_fail) {
- GotoIf(TaggedNotEqual(LoadMap(base), SloppyArgumentsElementsMapConstant()),
- cast_fail);
- return UncheckedCast<SloppyArgumentsElements>(base);
- }
-
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
return UncheckedCast<Int32T>(elements_kind);
}
+ template <typename T>
+ bool ClassHasMapConstant() {
+ return false;
+ }
+
+ template <typename T>
+ TNode<Map> GetClassMapConstant() {
+ UNREACHABLE();
+ return TNode<Map>();
+ }
+
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
@@ -2284,33 +2053,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// * If |source_elements_kind| is given, the function will try to use the
// runtime elements kind of source to make copy faster. More specifically, it
// can skip write barriers.
+ template <typename TIndex>
TNode<FixedArrayBase> ExtractFixedArray(
- TNode<FixedArrayBase> source, Node* first, Node* count = nullptr,
- Node* capacity = nullptr,
+ TNode<FixedArrayBase> source, base::Optional<TNode<TIndex>> first,
+ base::Optional<TNode<TIndex>> count = base::nullopt,
+ base::Optional<TNode<TIndex>> capacity = base::nullopt,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
TVariable<BoolT>* var_holes_converted = nullptr,
base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt);
- TNode<FixedArrayBase> ExtractFixedArray(
- TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
- TNode<Smi> capacity,
- ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return ExtractFixedArray(source, first, count, capacity, extract_flags,
- SMI_PARAMETERS);
- }
-
- TNode<FixedArray> ExtractFixedArray(
- TNode<FixedArray> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
- TNode<IntPtrT> capacity,
- ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags,
- INTPTR_PARAMETERS));
- }
-
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// FixedArray, including special appropriate handling for COW arrays.
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
@@ -2328,8 +2080,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS.
// * |allocation_flags| and |extract_flags| influence how the target
// FixedArray is allocated.
- // * |parameter_mode| determines the parameter mode of |first|, |count| and
- // |capacity|.
// * |convert_holes| is used to signify that the target array should use
// undefined in places of holes.
// * If |convert_holes| is true and |var_holes_converted| not nullptr, then
@@ -2338,15 +2088,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// compatible with the result array. For example, if the input was of
// HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be
// compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
+ template <typename TIndex>
TNode<FixedArray> ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, Node* first, Node* count,
- Node* capacity, SloppyTNode<Map> source_map,
- ElementsKind from_kind = PACKED_ELEMENTS,
- AllocationFlags allocation_flags = AllocationFlag::kNone,
- ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
+ SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
+ TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
+ ElementsKind from_kind, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
TVariable<BoolT>* var_holes_converted = nullptr,
base::Optional<TNode<Int32T>> source_runtime_kind = base::nullopt);
@@ -2366,15 +2113,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// is produced or not.
// * |allocation_flags| and |extract_flags| influence how the target array is
// allocated.
- // * |parameter_mode| determines the parameter mode of |first|, |count| and
- // |capacity|.
+ template <typename TIndex>
TNode<FixedArrayBase> ExtractFixedDoubleArrayFillingHoles(
- TNode<FixedArrayBase> source, Node* first, Node* count, Node* capacity,
- TNode<Map> source_map, TVariable<BoolT>* var_holes_converted,
- AllocationFlags allocation_flags,
- ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FixedArrayBase> source, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> source_map,
+ TVariable<BoolT>* var_holes_converted, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags);
// Copy the entire contents of a FixedArray or FixedDoubleArray to a new
// array, including special appropriate handling for empty arrays and COW
@@ -2390,30 +2134,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FixedArrayBase> CloneFixedArray(
TNode<FixedArrayBase> source,
ExtractFixedArrayFlags flags =
- ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
- ParameterMode mode = OptimalParameterMode();
- return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr,
- nullptr, flags, mode);
- }
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW);
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
- Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
+ Node* LoadElementAndPrepareForStore(TNode<FixedArrayBase> array,
+ TNode<IntPtrT> offset,
ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole);
- Node* CalculateNewElementsCapacity(Node* old_capacity,
- ParameterMode mode = INTPTR_PARAMETERS);
-
- TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
- return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
- }
- TNode<IntPtrT> CalculateNewElementsCapacity(TNode<IntPtrT> old_capacity) {
- return UncheckedCast<IntPtrT>(
- CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS));
- }
+ template <typename TIndex>
+ TNode<TIndex> CalculateNewElementsCapacity(TNode<TIndex> old_capacity);
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
@@ -2425,26 +2158,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
+ template <typename TIndex>
TNode<FixedArrayBase> TryGrowElementsCapacity(TNode<HeapObject> object,
TNode<FixedArrayBase> elements,
- ElementsKind kind, Node* key,
- Node* capacity,
- ParameterMode mode,
+ ElementsKind kind,
+ TNode<TIndex> key,
+ TNode<TIndex> capacity,
Label* bailout);
// Grows elements capacity of given object. Returns new elements.
+ template <typename TIndex>
TNode<FixedArrayBase> GrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements,
- ElementsKind from_kind, ElementsKind to_kind, Node* capacity,
- Node* new_capacity, ParameterMode mode, Label* bailout);
+ ElementsKind from_kind, ElementsKind to_kind, TNode<TIndex> capacity,
+ TNode<TIndex> new_capacity, Label* bailout);
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
// is followed for allocation failure.
- void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
- TNode<HeapObject> array, Node* length,
+ void PossiblyGrowElementsCapacity(ElementsKind kind, TNode<HeapObject> array,
+ TNode<BInt> length,
TVariable<FixedArrayBase>* var_elements,
- Node* growth, Label* bailout);
+ TNode<BInt> growth, Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(TNode<HeapObject> base,
@@ -2566,9 +2301,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
- TNode<BoolT> IsAccessorInfo(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsAccessorPair(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2577,38 +2309,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsCallableMap(SloppyTNode<Map> map);
TNode<BoolT> IsCallable(SloppyTNode<HeapObject> object);
TNode<BoolT> TaggedIsCallable(TNode<Object> object);
- TNode<BoolT> IsCell(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsCode(SloppyTNode<HeapObject> object);
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsCoverageInfo(TNode<HeapObject> object);
- TNode<BoolT> IsDebugInfo(TNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFeedbackVector(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsContext(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArrayWithKind(SloppyTNode<HeapObject> object,
ElementsKind kind);
TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<FixedArrayBase> object,
ElementsKind kind);
- TNode<BoolT> IsFixedDoubleArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSAggregateError(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2617,7 +2339,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSBoundFunction(SloppyTNode<HeapObject> object);
@@ -2650,21 +2371,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsNativeContext(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrJSReceiver(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsSeqOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
- TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsPromiseReaction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPromiseReactionJobTask(TNode<HeapObject> object);
- TNode<BoolT> IsPromiseRejectReactionJobTask(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsPromiseFulfillReactionJobTask(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPrototypeInitialArrayPrototype(SloppyTNode<Context> context,
SloppyTNode<Map> map);
TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
@@ -2685,13 +2401,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
- // Returns true if the map corresponds to non-special fast or dictionary
- // object.
- TNode<BoolT> IsSimpleObjectMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsSeqOneByteString(TNode<HeapObject> object);
+
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
@@ -2700,10 +2414,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
- inline TNode<BoolT> IsSharedFunctionInfo(TNode<HeapObject> object) {
- return IsSharedFunctionInfoMap(LoadMap(object));
- }
-
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
@@ -2720,8 +2430,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Int32Constant(0));
}
- // True iff |object| is a Smi or a HeapNumber.
- TNode<BoolT> IsNumber(SloppyTNode<Object> object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
TNode<BoolT> IsNumeric(SloppyTNode<Object> object);
@@ -2749,9 +2457,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// [0, 2^32-1).
TNode<BoolT> IsNumberArrayIndex(TNode<Number> number);
- TNode<BoolT> FixedArraySizeDoesntFitInNewSpace(
- Node* element_count, int base_size = FixedArray::kHeaderSize,
- ParameterMode mode = INTPTR_PARAMETERS);
+ template <typename TIndex>
+ TNode<BoolT> FixedArraySizeDoesntFitInNewSpace(TNode<TIndex> element_count,
+ int base_size);
+
+ TNode<BoolT> IsMetaMap(TNode<HeapObject> o) { return IsMapMap(o); }
// ElementsKind helpers:
TNode<BoolT> ElementsKindEqual(TNode<Int32T> a, TNode<Int32T> b) {
@@ -2844,6 +2554,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
+ TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
+ OrdinaryToPrimitiveHint hint);
+
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
@@ -3433,6 +3146,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind elements_kind,
TNode<Context> context);
+ template <typename T>
+ TNode<T> PrepareValueForWriteToTypedArray(TNode<Object> input,
+ ElementsKind elements_kind,
+ TNode<Context> context);
+
// Store value to an elements array with given elements kind.
// TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
// we pass {value} as BigInt object instead of int64_t. We should
@@ -3457,15 +3175,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> context,
TVariable<Object>* maybe_converted_value = nullptr);
- Node* CheckForCapacityGrow(TNode<JSObject> object,
- TNode<FixedArrayBase> elements, ElementsKind kind,
- TNode<UintPtrT> length, TNode<IntPtrT> key,
- Label* bailout);
+ TNode<FixedArrayBase> CheckForCapacityGrow(
+ TNode<JSObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
+ TNode<UintPtrT> length, TNode<IntPtrT> key, Label* bailout);
TNode<FixedArrayBase> CopyElementsOnWrite(TNode<HeapObject> object,
TNode<FixedArrayBase> elements,
- ElementsKind kind, Node* length,
- ParameterMode mode, Label* bailout);
+ ElementsKind kind,
+ TNode<IntPtrT> length,
+ Label* bailout);
void TransitionElementsKind(TNode<JSObject> object, TNode<Map> map,
ElementsKind from_kind, ElementsKind to_kind,
@@ -3512,53 +3230,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class ForEachDirection { kForward, kReverse };
- using FastFixedArrayForEachBody =
- std::function<void(Node* fixed_array, Node* offset)>;
+ using FastArrayForEachBody =
+ std::function<void(TNode<HeapObject> array, TNode<IntPtrT> offset)>;
- void BuildFastFixedArrayForEach(
- const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+ void BuildFastArrayForEach(
+ const CodeStubAssembler::VariableList& vars, Node* array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
- void BuildFastFixedArrayForEach(
- Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ void BuildFastArrayForEach(
+ Node* array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse) {
CodeStubAssembler::VariableList list(0, zone());
- BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
- last_element_exclusive, body, mode, direction);
+ BuildFastArrayForEach(list, array, kind, first_element_inclusive,
+ last_element_exclusive, body, mode, direction);
}
- TNode<IntPtrT> GetArrayAllocationSize(TNode<IntPtrT> element_count,
+ template <typename TIndex>
+ TNode<IntPtrT> GetArrayAllocationSize(TNode<TIndex> element_count,
ElementsKind kind, int header_size) {
return ElementOffsetFromIndex(element_count, kind, header_size);
}
- // TODO(v8:9708): remove once all uses are ported.
- TNode<IntPtrT> GetArrayAllocationSize(Node* element_count, ElementsKind kind,
- ParameterMode mode, int header_size) {
- return ElementOffsetFromIndex(element_count, kind, mode, header_size);
- }
-
- TNode<IntPtrT> GetFixedArrayAllocationSize(Node* element_count,
- ElementsKind kind,
- ParameterMode mode) {
- return GetArrayAllocationSize(element_count, kind, mode,
- FixedArray::kHeaderSize);
+ template <typename TIndex>
+ TNode<IntPtrT> GetFixedArrayAllocationSize(TNode<TIndex> element_count,
+ ElementsKind kind) {
+ return GetArrayAllocationSize(element_count, kind, FixedArray::kHeaderSize);
}
- TNode<IntPtrT> GetPropertyArrayAllocationSize(Node* element_count,
- ParameterMode mode) {
- return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode,
+ TNode<IntPtrT> GetPropertyArrayAllocationSize(TNode<IntPtrT> element_count) {
+ return GetArrayAllocationSize(element_count, PACKED_ELEMENTS,
PropertyArray::kHeaderSize);
}
- void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count,
- Label* doesnt_fit, int base_size,
- ParameterMode mode);
+ template <typename TIndex>
+ void GotoIfFixedArraySizeDoesntFitInNewSpace(TNode<TIndex> element_count,
+ Label* doesnt_fit,
+ int base_size);
void InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
@@ -3689,6 +3401,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSTypedArray helpers
TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
+ TNode<JSArrayBuffer> GetTypedArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> array);
template <typename TIndex>
TNode<IntPtrT> ElementOffsetFromIndex(TNode<TIndex> index, ElementsKind kind,
@@ -3764,6 +3478,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
+ int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; }
int31_t ConstexprInt31Add(int31_t a, int31_t b) {
int32_t val;
CHECK(!base::bits::SignedAddOverflow32(a, b, &val));
@@ -3775,6 +3490,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return val;
}
+ int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; }
+
bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; }
// CSA does not support 64-bit types on 32-bit platforms so as a workaround
@@ -3903,12 +3620,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> TryToIntptr(SloppyTNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type = nullptr);
- TNode<Context> AllocateSyntheticFunctionContext(
- TNode<NativeContext> native_context, int slots);
- void InitializeSyntheticFunctionContext(TNode<NativeContext> native_context,
- TNode<HeapObject> context_heap_object,
- int slots);
-
TNode<JSArray> ArrayCreate(TNode<Context> context, TNode<Number> length);
// Allocate a clone of a mutable primitive, if {object} is a mutable
@@ -3943,9 +3654,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// fields initialized.
TNode<JSArray> AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes);
-
- TNode<BoolT> IsValidSmi(TNode<Smi> smi);
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes);
TNode<IntPtrT> SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
@@ -3991,10 +3701,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<BigInt>* var_maybe_bigint = nullptr,
TVariable<Smi>* var_feedback = nullptr);
- Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
+ Node* LoadObjectField(TNode<HeapObject> object, int offset, MachineType type);
+ Node* LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset,
MachineType type);
- Node* LoadObjectField(SloppyTNode<HeapObject> object,
- SloppyTNode<IntPtrT> offset, MachineType type);
// Low-level accessors for Descriptor arrays.
template <typename T>
@@ -4017,49 +3726,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ParameterMode parameter_mode = INTPTR_PARAMETERS);
};
-// template <typename TIndex>
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
using Node = compiler::Node;
- enum ReceiverMode { kHasReceiver, kNoReceiver };
// |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
+ // the receiver. The arguments include the receiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc)
+ : CodeStubArguments(assembler, argc, TNode<RawPtrT>()) {}
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc)
+ : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc)) {}
CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {}
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
-
- // TODO(v8:9708): Consider removing this variant
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
-
- // |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp,
- receiver_mode) {}
+ TNode<RawPtrT> fp);
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- receiver_mode_(ReceiverMode::kHasReceiver),
argc_(torque_arguments.length),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4072,68 +3756,41 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
// Computes address of the index'th argument.
TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const;
- TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const {
- return AtIndexPtr(assembler_->ParameterToIntPtr(index));
- }
// |index| is zero-based and does not include the receiver
TNode<Object> AtIndex(TNode<IntPtrT> index) const;
- // TODO(v8:9708): Consider removing this variant
- TNode<Object> AtIndex(TNode<Smi> index) const {
- return AtIndex(assembler_->ParameterToIntPtr(index));
- }
-
TNode<Object> AtIndex(int index) const;
- TNode<Object> GetOptionalArgumentValue(int index) {
- return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
- }
- TNode<Object> GetOptionalArgumentValue(int index,
- TNode<Object> default_value);
-
TNode<IntPtrT> GetLength() const { return argc_; }
TorqueStructArguments GetTorqueArguments() const {
return TorqueStructArguments{fp_, base_, argc_};
}
+ TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
+ TNode<Object> default_value);
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
- TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
- TNode<Object> default_value);
-
- using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ TNode<Object> GetOptionalArgumentValue(int index) {
+ return GetOptionalArgumentValue(assembler_->IntPtrConstant(index));
+ }
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- template <typename TIndex>
- void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {},
- TNode<TIndex> last = {}) const {
+ using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ void ForEach(const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
+ TNode<IntPtrT> last = {}) const {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
-
- // Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
TNode<IntPtrT> last = {}) const;
- void ForEach(const CodeStubAssembler::VariableList& vars,
- const ForEachBodyFunction& body, TNode<Smi> first,
- TNode<Smi> last = {}) const {
- TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first);
- TNode<IntPtrT> last_intptr;
- if (last != nullptr) {
- last_intptr = assembler_->ParameterToIntPtr(last);
- }
- return ForEach(vars, body, first_intptr, last_intptr);
- }
-
void PopAndReturn(TNode<Object> value);
private:
CodeStubAssembler* assembler_;
- ReceiverMode receiver_mode_;
TNode<IntPtrT> argc_;
TNode<RawPtrT> base_;
TNode<RawPtrT> fp_;
@@ -4226,6 +3883,19 @@ class PrototypeCheckAssembler : public CodeStubAssembler {
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
+#define CLASS_MAP_CONSTANT_ADAPTER(V, rootIndexName, rootAccessorName, \
+ class_name) \
+ template <> \
+ inline bool CodeStubAssembler::ClassHasMapConstant<class_name>() { \
+ return true; \
+ } \
+ template <> \
+ inline TNode<Map> CodeStubAssembler::GetClassMapConstant<class_name>() { \
+ return class_name##MapConstant(); \
+ }
+
+UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _)
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index ef3d83a06e..9c5cb42edd 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -12,6 +12,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -28,16 +29,17 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
+ code_(isolate),
enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
- &script_, &eval_global_, &eval_contextual_, &reg_exp_};
+ &script_, &eval_global_, &eval_contextual_, &reg_exp_, &code_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
}
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
- DCHECK(generation < generations_);
+ DCHECK_LT(generation, generations());
Handle<CompilationCacheTable> result;
if (tables_[generation].IsUndefined(isolate())) {
result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
@@ -50,33 +52,44 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
-void CompilationSubCache::Age() {
- // Don't directly age single-generation caches.
- if (generations_ == 1) {
- if (!tables_[0].IsUndefined(isolate())) {
- CompilationCacheTable::cast(tables_[0]).Age();
- }
- return;
- }
+// static
+void CompilationSubCache::AgeByGeneration(CompilationSubCache* c) {
+ DCHECK_GT(c->generations(), 1);
// Age the generations implicitly killing off the oldest.
- for (int i = generations_ - 1; i > 0; i--) {
- tables_[i] = tables_[i - 1];
+ for (int i = c->generations() - 1; i > 0; i--) {
+ c->tables_[i] = c->tables_[i - 1];
}
// Set the first generation as unborn.
- tables_[0] = ReadOnlyRoots(isolate()).undefined_value();
+ c->tables_[0] = ReadOnlyRoots(c->isolate()).undefined_value();
+}
+
+// static
+void CompilationSubCache::AgeCustom(CompilationSubCache* c) {
+ DCHECK_EQ(c->generations(), 1);
+ if (c->tables_[0].IsUndefined(c->isolate())) return;
+ CompilationCacheTable::cast(c->tables_[0]).Age();
+}
+
+void CompilationCacheScript::Age() { AgeCustom(this); }
+void CompilationCacheEval::Age() { AgeCustom(this); }
+void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
+void CompilationCacheCode::Age() {
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
+ AgeByGeneration(this);
}
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, nullptr,
FullObjectSlot(&tables_[0]),
- FullObjectSlot(&tables_[generations_]));
+ FullObjectSlot(&tables_[generations()]));
}
void CompilationSubCache::Clear() {
MemsetPointer(reinterpret_cast<Address*>(tables_),
- ReadOnlyRoots(isolate()).undefined_value().ptr(), generations_);
+ ReadOnlyRoots(isolate()).undefined_value().ptr(),
+ generations());
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
@@ -253,6 +266,58 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data));
}
+MaybeHandle<Code> CompilationCacheCode::Lookup(Handle<SharedFunctionInfo> key) {
+ // Make sure not to leak the table into the surrounding handle
+ // scope. Otherwise, we risk keeping old tables around even after
+ // having cleared the cache.
+ HandleScope scope(isolate());
+ MaybeHandle<Code> maybe_value;
+ int generation = 0;
+ for (; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ maybe_value = table->LookupCode(key);
+ if (!maybe_value.is_null()) break;
+ }
+
+ if (maybe_value.is_null()) {
+ isolate()->counters()->compilation_cache_misses()->Increment();
+ return MaybeHandle<Code>();
+ }
+
+ Handle<Code> value = maybe_value.ToHandleChecked();
+ if (generation != 0) Put(key, value); // Add to the first generation.
+ isolate()->counters()->compilation_cache_hits()->Increment();
+ return scope.CloseAndEscape(value);
+}
+
+void CompilationCacheCode::Put(Handle<SharedFunctionInfo> key,
+ Handle<Code> value) {
+ HandleScope scope(isolate());
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value));
+}
+
+void CompilationCacheCode::TraceAgeing() {
+ DCHECK(FLAG_trace_turbo_nci);
+ StdoutStream os;
+ os << "NCI cache ageing: Removing oldest generation" << std::endl;
+}
+
+void CompilationCacheCode::TraceInsertion(Handle<SharedFunctionInfo> key,
+ Handle<Code> value) {
+ DCHECK(FLAG_trace_turbo_nci);
+ StdoutStream os;
+ os << "NCI cache insertion: " << Brief(*key) << ", " << Brief(*value)
+ << std::endl;
+}
+
+void CompilationCacheCode::TraceHit(Handle<SharedFunctionInfo> key,
+ Handle<Code> value) {
+ DCHECK(FLAG_trace_turbo_nci);
+ StdoutStream os;
+ os << "NCI cache hit: " << Brief(*key) << ", " << Brief(*value) << std::endl;
+}
+
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabledScriptAndEval()) return;
@@ -306,6 +371,10 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return reg_exp_.Lookup(source, flags);
}
+MaybeHandle<Code> CompilationCache::LookupCode(Handle<SharedFunctionInfo> sfi) {
+ return code_.Lookup(sfi);
+}
+
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> native_context,
LanguageMode language_mode,
@@ -345,6 +414,11 @@ void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
reg_exp_.Put(source, flags, data);
}
+void CompilationCache::PutCode(Handle<SharedFunctionInfo> shared,
+ Handle<Code> code) {
+ code_.Put(shared, code);
+}
+
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 04bea44a82..8aac29fc29 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_COMPILATION_CACHE_H_
#define V8_CODEGEN_COMPILATION_CACHE_H_
+#include "src/base/hashmap.h"
#include "src/objects/compilation-cache.h"
#include "src/utils/allocation.h"
@@ -25,13 +26,11 @@ class CompilationSubCache {
public:
CompilationSubCache(Isolate* isolate, int generations)
: isolate_(isolate), generations_(generations) {
- tables_ = NewArray<Object>(generations);
+ DCHECK_LE(generations, kMaxGenerations);
}
- ~CompilationSubCache() { DeleteArray(tables_); }
-
- // Index for the first generation in the cache.
- static const int kFirstGeneration = 0;
+ static constexpr int kFirstGeneration = 0;
+ static constexpr int kMaxGenerations = 2;
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
@@ -47,7 +46,7 @@ class CompilationSubCache {
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
- void Age();
+ virtual void Age() = 0;
// GC support.
void Iterate(RootVisitor* v);
@@ -59,15 +58,20 @@ class CompilationSubCache {
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
- inline int generations() { return generations_; }
+ int generations() const { return generations_; }
protected:
- Isolate* isolate() { return isolate_; }
+ Isolate* isolate() const { return isolate_; }
+
+ // Ageing occurs either by removing the oldest generation, or with
+ // custom logic implemented in CompilationCacheTable::Age.
+ static void AgeByGeneration(CompilationSubCache* c);
+ static void AgeCustom(CompilationSubCache* c);
private:
- Isolate* isolate_;
- int generations_; // Number of generations.
- Object* tables_; // Compilation cache tables - one for each generation.
+ Isolate* const isolate_;
+ const int generations_;
+ Object tables_[kMaxGenerations]; // One for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
@@ -88,6 +92,8 @@ class CompilationCacheScript : public CompilationSubCache {
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
+ void Age() override;
+
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
MaybeHandle<Object> name, int line_offset, int column_offset,
@@ -123,6 +129,8 @@ class CompilationCacheEval : public CompilationSubCache {
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position);
+ void Age() override;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
@@ -138,10 +146,38 @@ class CompilationCacheRegExp : public CompilationSubCache {
void Put(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
+ void Age() override;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
+// Sub-cache for Code objects. All code inserted into this cache must
+// be usable across different native contexts.
+class CompilationCacheCode : public CompilationSubCache {
+ public:
+ explicit CompilationCacheCode(Isolate* isolate)
+ : CompilationSubCache(isolate, kGenerations) {}
+
+ MaybeHandle<Code> Lookup(Handle<SharedFunctionInfo> key);
+ void Put(Handle<SharedFunctionInfo> key, Handle<Code> value);
+
+ void Age() override;
+
+ // TODO(jgruber,v8:8888): For simplicity we use the generational
+ // approach here, but could consider something else (or more
+ // generations) in the future.
+ static constexpr int kGenerations = 2;
+
+ static void TraceAgeing();
+ static void TraceInsertion(Handle<SharedFunctionInfo> key,
+ Handle<Code> value);
+ static void TraceHit(Handle<SharedFunctionInfo> key, Handle<Code> value);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode);
+};
+
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
@@ -169,6 +205,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
MaybeHandle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
+ MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> sfi);
+
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, Handle<Context> native_context,
@@ -187,6 +225,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
void PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
+ void PutCode(Handle<SharedFunctionInfo> shared, Handle<Code> code);
+
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@@ -217,9 +257,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
base::HashMap* EagerOptimizingSet();
- // The number of sub caches covering the different types to cache.
- static const int kSubCacheCount = 4;
-
bool IsEnabledScriptAndEval() const {
return FLAG_compilation_cache && enabled_script_and_eval_;
}
@@ -232,6 +269,9 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
+ CompilationCacheCode code_;
+
+ static constexpr int kSubCacheCount = 5;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache for scripts and eval.
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index c436c57407..bcc97e32f7 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -30,12 +30,12 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/isolate.h"
-#include "src/execution/off-thread-isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h"
-#include "src/heap/off-thread-factory-inl.h"
+#include "src/heap/local-factory-inl.h"
+#include "src/heap/local-heap-inl.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log-inl.h"
@@ -56,21 +56,102 @@
namespace v8 {
namespace internal {
-// A wrapper around a OptimizedCompilationInfo that detaches the Handles from
-// the underlying DeferredHandleScope and stores them in info_ on
-// destruction.
-class CompilationHandleScope final {
+namespace {
+
+bool IsForNativeContextIndependentCachingOnly(CodeKind kind) {
+ return CodeKindIsNativeContextIndependentJSFunction(kind) &&
+ !FLAG_turbo_nci_as_highest_tier;
+}
+
+bool IsForNativeContextIndependentCachingOnly(OptimizedCompilationInfo* info) {
+ return IsForNativeContextIndependentCachingOnly(info->code_kind());
+}
+
+class CompilerTracer : public AllStatic {
public:
- explicit CompilationHandleScope(Isolate* isolate,
- OptimizedCompilationInfo* info)
- : deferred_(isolate), info_(info) {}
- ~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
+ static void PrintTracePrefix(const CodeTracer::Scope& scope,
+ const char* header,
+ OptimizedCompilationInfo* info) {
+ PrintF(scope.file(), "[%s ", header);
+ info->closure()->ShortPrint(scope.file());
+ PrintF(scope.file(), " (target %s)", CodeKindToString(info->code_kind()));
+ }
+
+ static void PrintTracePrefix(const CodeTracer::Scope& scope,
+ const char* header,
+ Handle<JSFunction> function) {
+ PrintF(scope.file(), "[%s ", header);
+ function->ShortPrint(scope.file());
+ }
- private:
- DeferredHandleScope deferred_;
- OptimizedCompilationInfo* info_;
+ static void PrintTraceSuffix(const CodeTracer::Scope& scope) {
+ PrintF(scope.file(), "]\n");
+ }
+
+ static void TracePrepareJob(Isolate* isolate, OptimizedCompilationInfo* info,
+ const char* compiler_name) {
+ if (!FLAG_trace_opt || !info->IsOptimizing()) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "compiling method", info);
+ PrintF(scope.file(), " using %s%s", compiler_name,
+ info->is_osr() ? " OSR" : "");
+ PrintTraceSuffix(scope);
+ }
+
+ static void TraceCompilationStats(Isolate* isolate,
+ OptimizedCompilationInfo* info,
+ double ms_creategraph, double ms_optimize,
+ double ms_codegen) {
+ if (!FLAG_trace_opt || !info->IsOptimizing()) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "optimizing", info);
+ PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_creategraph,
+ ms_optimize, ms_codegen);
+ PrintTraceSuffix(scope);
+ }
+
+ static void TraceCompletedJob(Isolate* isolate,
+ OptimizedCompilationInfo* info) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "completed optimizing", info);
+ PrintTraceSuffix(scope);
+ }
+
+ static void TraceAbortedJob(Isolate* isolate,
+ OptimizedCompilationInfo* info) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "aborted optimizing", info);
+ PrintF(scope.file(), " because: %s",
+ GetBailoutReason(info->bailout_reason()));
+ PrintTraceSuffix(scope);
+ }
+
+ static void TraceOptimizedCodeCacheHit(Isolate* isolate,
+ Handle<JSFunction> function,
+ BailoutId osr_offset) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "found optimized code for", function);
+ if (!osr_offset.IsNone()) {
+ PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
+ }
+ PrintTraceSuffix(scope);
+ }
+
+ static void TraceOptimizeForAlwaysOpt(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "optimizing", function);
+ PrintF(scope.file(), " because --always-opt");
+ PrintTraceSuffix(scope);
+ }
};
+} // namespace
+
// Helper that times a scoped region and records the elapsed time.
struct ScopedTimer {
explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
@@ -172,9 +253,7 @@ CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
}
CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
- Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate) {
- DisallowHeapAccess no_heap_access;
-
+ Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToFinalize);
ScopedTimer t(&time_taken_to_finalize_);
@@ -231,15 +310,7 @@ void RecordUnoptimizedFunctionCompilation(
CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
DisallowJavascriptExecution no_js(isolate);
-
- if (FLAG_trace_opt && compilation_info()->IsOptimizing()) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- OFStream os(scope.file());
- os << "[compiling method " << Brief(*compilation_info()->closure())
- << " using " << compiler_name_;
- if (compilation_info()->is_osr()) os << " OSR";
- os << "]" << std::endl;
- }
+ CompilerTracer::TracePrepareJob(isolate, compilation_info(), compiler_name_);
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToPrepare);
@@ -287,13 +358,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
double ms_optimize = time_taken_to_execute_.InMillisecondsF();
double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[optimizing ");
- function->ShortPrint(scope.file());
- PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph,
- ms_optimize, ms_codegen);
- }
+ CompilerTracer::TraceCompilationStats(
+ isolate, compilation_info(), ms_creategraph, ms_optimize, ms_codegen);
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
static int compiled_functions = 0;
@@ -434,7 +500,7 @@ void InstallCoverageInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared,
isolate->debug()->InstallCoverageInfo(shared, coverage_info);
}
-void InstallCoverageInfo(OffThreadIsolate* isolate,
+void InstallCoverageInfo(LocalIsolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<CoverageInfo> coverage_info) {
// We should only have coverage info when finalizing on the main thread.
@@ -445,13 +511,6 @@ template <typename LocalIsolate>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
LocalIsolate* isolate) {
- DCHECK_EQ(shared_info->language_mode(),
- compilation_info->literal()->language_mode());
-
- // Update the shared function info with the scope info.
- Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
- shared_info->set_scope_info(*scope_info);
-
if (compilation_info->has_bytecode_array()) {
DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
DCHECK(!compilation_info->has_asm_wasm_data());
@@ -519,8 +578,10 @@ void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
script->set_shared_function_infos(*infos);
}
-void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
- SharedFunctionInfo shared_info) {
+void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
+ SharedFunctionInfo shared_info) {
+ DCHECK_EQ(shared_info.language_mode(), literal->language_mode());
+
shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters());
shared_info.set_is_oneshot_iife(literal->is_oneshot_iife());
shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
@@ -534,8 +595,13 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
literal->SafeToSkipArgumentsAdaptor());
shared_info.set_has_static_private_methods_or_accessors(
literal->has_static_private_methods_or_accessors());
+
+ shared_info.set_scope_info(*literal->scope()->scope_info());
}
+// Finalize a single compilation job. This function can return
+// RETRY_ON_MAIN_THREAD if the job cannot be finalized off-thread, in which case
+// it should be safe to call it again on the main thread with the same job.
template <typename LocalIsolate>
CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
@@ -544,8 +610,6 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
finalize_unoptimized_compilation_data_list) {
UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
- SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), *shared_info);
-
CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
InstallUnoptimizedCode(compilation_info, shared_info, isolate);
@@ -553,6 +617,8 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
isolate, shared_info, job->time_taken_to_execute(),
job->time_taken_to_finalize());
}
+ DCHECK_IMPLIES(status == CompilationJob::RETRY_ON_MAIN_THREAD,
+ (std::is_same<LocalIsolate, LocalIsolate>::value));
return status;
}
@@ -585,37 +651,38 @@ ExecuteSingleUnoptimizedCompilationJob(
return job;
}
-std::unique_ptr<UnoptimizedCompilationJob>
-RecursivelyExecuteUnoptimizedCompilationJobs(
+bool RecursivelyExecuteUnoptimizedCompilationJobs(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* inner_function_jobs) {
+ UnoptimizedCompilationJobList* function_jobs) {
std::vector<FunctionLiteral*> eager_inner_literals;
std::unique_ptr<UnoptimizedCompilationJob> job =
ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
&eager_inner_literals);
- if (!job) return std::unique_ptr<UnoptimizedCompilationJob>();
+ if (!job) return false;
// Recursively compile eager inner literals.
for (FunctionLiteral* inner_literal : eager_inner_literals) {
- std::unique_ptr<UnoptimizedCompilationJob> inner_job(
- RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, inner_literal, allocator, inner_function_jobs));
- // Compilation failed, return null.
- if (!inner_job) return std::unique_ptr<UnoptimizedCompilationJob>();
- inner_function_jobs->emplace_front(std::move(inner_job));
+ if (!RecursivelyExecuteUnoptimizedCompilationJobs(
+ parse_info, inner_literal, allocator, function_jobs)) {
+ return false;
+ }
}
- return job;
+ function_jobs->emplace_front(std::move(job));
+ return true;
}
+template <typename LocalIsolate>
bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
- Isolate* isolate, Handle<SharedFunctionInfo> outer_shared_info,
+ LocalIsolate* isolate, Handle<SharedFunctionInfo> outer_shared_info,
Handle<Script> script, ParseInfo* parse_info,
AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope,
FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data_list) {
+ finalize_unoptimized_compilation_data_list,
+ DeferredFinalizationJobDataList*
+ jobs_to_retry_finalization_on_main_thread) {
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
std::vector<FunctionLiteral*> functions_to_compile;
@@ -633,17 +700,36 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
&functions_to_compile);
if (!job) return false;
- if (FinalizeSingleUnoptimizedCompilationJob(
- job.get(), shared_info, isolate,
- finalize_unoptimized_compilation_data_list) !=
- CompilationJob::SUCCEEDED) {
- return false;
- }
+ UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info);
+
+ auto finalization_status = FinalizeSingleUnoptimizedCompilationJob(
+ job.get(), shared_info, isolate,
+ finalize_unoptimized_compilation_data_list);
+
+ switch (finalization_status) {
+ case CompilationJob::SUCCEEDED:
+ if (shared_info.is_identical_to(outer_shared_info)) {
+ // Ensure that the top level function is retained.
+ *is_compiled_scope = shared_info->is_compiled_scope(isolate);
+ DCHECK(is_compiled_scope->is_compiled());
+ }
+ break;
- if (shared_info.is_identical_to(outer_shared_info)) {
- // Ensure that the top level function is retained.
- *is_compiled_scope = shared_info->is_compiled_scope();
- DCHECK(is_compiled_scope->is_compiled());
+ case CompilationJob::FAILED:
+ return false;
+
+ case CompilationJob::RETRY_ON_MAIN_THREAD:
+ // This should not happen on the main thread.
+ DCHECK((!std::is_same<LocalIsolate, Isolate>::value));
+ DCHECK_NOT_NULL(jobs_to_retry_finalization_on_main_thread);
+
+ // Clear the literal and ParseInfo to prevent further attempts to access
+ // them.
+ job->compilation_info()->ClearLiteral();
+ job->ClearParseInfo();
+ jobs_to_retry_finalization_on_main_thread->emplace_back(
+ isolate, shared_info, std::move(job));
+ break;
}
}
@@ -655,16 +741,13 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
return true;
}
-template <typename LocalIsolate>
bool FinalizeAllUnoptimizedCompilationJobs(
- ParseInfo* parse_info, LocalIsolate* isolate,
- Handle<SharedFunctionInfo> shared_info,
- UnoptimizedCompilationJob* outer_function_job,
- UnoptimizedCompilationJobList* inner_function_jobs,
+ ParseInfo* parse_info, Isolate* isolate, Handle<Script> script,
+ UnoptimizedCompilationJobList* compilation_jobs,
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data_list) {
- // TODO(leszeks): Re-enable.
- // DCHECK(AllowCompilation::IsAllowed(isolate));
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+ DCHECK(!compilation_jobs->empty());
// TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
// rely on accessing native context during finalization.
@@ -672,26 +755,16 @@ bool FinalizeAllUnoptimizedCompilationJobs(
// Allocate scope infos for the literal.
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
- // Finalize the outer-most function's compilation job.
- if (FinalizeSingleUnoptimizedCompilationJob(
- outer_function_job, shared_info, isolate,
- finalize_unoptimized_compilation_data_list) !=
- CompilationJob::SUCCEEDED) {
- return false;
- }
-
- Handle<Script> script(Script::cast(shared_info->script()), isolate);
- parse_info->CheckFlagsForFunctionFromScript(*script);
-
- // Finalize the inner functions' compilation jobs.
- for (auto&& inner_job : *inner_function_jobs) {
- Handle<SharedFunctionInfo> inner_shared_info =
- Compiler::GetSharedFunctionInfo(
- inner_job->compilation_info()->literal(), script, isolate);
+ // Finalize the functions' compilation jobs.
+ for (auto&& job : *compilation_jobs) {
+ FunctionLiteral* literal = job->compilation_info()->literal();
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(literal, script, isolate);
// The inner function might be compiled already if compiling for debug.
- if (inner_shared_info->is_compiled()) continue;
+ if (shared_info->is_compiled()) continue;
+ UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info);
if (FinalizeSingleUnoptimizedCompilationJob(
- inner_job.get(), inner_shared_info, isolate,
+ job.get(), shared_info, isolate,
finalize_unoptimized_compilation_data_list) !=
CompilationJob::SUCCEEDED) {
return false;
@@ -706,6 +779,38 @@ bool FinalizeAllUnoptimizedCompilationJobs(
return true;
}
+bool FinalizeDeferredUnoptimizedCompilationJobs(
+ Isolate* isolate, Handle<Script> script,
+ DeferredFinalizationJobDataList* deferred_jobs,
+ PendingCompilationErrorHandler* pending_error_handler,
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data_list) {
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ if (deferred_jobs->empty()) return true;
+
+ // TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
+ // rely on accessing native context during finalization.
+
+ // Finalize the deferred compilation jobs.
+ for (auto&& job : *deferred_jobs) {
+ Handle<SharedFunctionInfo> shared_info = job.function_handle();
+ if (FinalizeSingleUnoptimizedCompilationJob(
+ job.job(), shared_info, isolate,
+ finalize_unoptimized_compilation_data_list) !=
+ CompilationJob::SUCCEEDED) {
+ return false;
+ }
+ }
+
+ // Report any warnings generated during deferred finalization.
+ if (pending_error_handler->has_pending_warnings()) {
+ pending_error_handler->PrepareWarnings(isolate);
+ }
+
+ return true;
+}
+
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BailoutId osr_offset) {
RuntimeCallTimerScope runtimeTimer(
@@ -746,12 +851,14 @@ void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
void InsertCodeIntoOptimizedCodeCache(
OptimizedCompilationInfo* compilation_info) {
- Handle<Code> code = compilation_info->code();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
+ // Cached NCI code currently does not use the optimization marker field.
+ if (IsForNativeContextIndependentCachingOnly(compilation_info)) return;
+
+ if (!CodeKindIsOptimizedJSFunction(compilation_info->code_kind())) return;
// Function context specialization folds-in the function context,
// so no sharing can occur.
- if (compilation_info->is_function_context_specializing()) {
+ if (compilation_info->function_context_specializing()) {
// Native context specialized code is not shared, so make sure the optimized
// code cache is clear.
ClearOptimizedCodeCache(compilation_info);
@@ -759,6 +866,7 @@ void InsertCodeIntoOptimizedCodeCache(
}
// Cache optimized context-specific code.
+ Handle<Code> code = compilation_info->code();
Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
Handle<NativeContext> native_context(function->context().native_context(),
@@ -773,25 +881,57 @@ void InsertCodeIntoOptimizedCodeCache(
}
}
-bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate) {
+void InsertCodeIntoCompilationCache(Isolate* isolate,
+ OptimizedCompilationInfo* info) {
+ if (!CodeKindIsNativeContextIndependentJSFunction(info->code_kind())) return;
+
+ // TODO(jgruber,v8:8888): This should turn into a DCHECK once we
+ // spawn dedicated NCI compile tasks.
+ if (!info->osr_offset().IsNone()) return;
+
+ Handle<Code> code = info->code();
+ DCHECK(!info->function_context_specializing());
+
+ Handle<SharedFunctionInfo> sfi = info->shared_info();
+ CompilationCache* cache = isolate->compilation_cache();
+ cache->PutCode(sfi, code);
+ DCHECK(!cache->LookupCode(sfi).is_null());
+
+ sfi->set_may_have_cached_code(true);
+
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceInsertion(sfi, code);
+}
+
+V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromCompilationCache(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ if (!shared->may_have_cached_code()) return {};
+ return shared->TryGetCachedCode(isolate);
+}
+
+// Runs PrepareJob in the proper compilation & canonical scopes. Handles will be
+// allocated in a persistent handle scope that is detached and handed off to the
+// {compilation_info} after PrepareJob.
+bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
+ OptimizedCompilationInfo* compilation_info) {
+ CompilationHandleScope compilation(isolate, compilation_info);
+ CanonicalHandleScope canonical(isolate, compilation_info);
+ compilation_info->ReopenHandlesInNewHandleScope(isolate);
+ return job->PrepareJob(isolate) == CompilationJob::SUCCEEDED;
+}
+
+bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
+ OptimizedCompilationInfo* compilation_info) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(
isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeNonConcurrent");
- if (job->PrepareJob(isolate) != CompilationJob::SUCCEEDED ||
+ if (!PrepareJobWithHandleScope(job, isolate, compilation_info) ||
job->ExecuteJob(isolate->counters()->runtime_call_stats()) !=
CompilationJob::SUCCEEDED ||
job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[aborted optimizing ");
- compilation_info->closure()->ShortPrint(scope.file());
- PrintF(scope.file(), " because: %s]\n",
- GetBailoutReason(compilation_info->bailout_reason()));
- }
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
return false;
}
@@ -803,8 +943,10 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate) {
return true;
}
-bool GetOptimizedCodeLater(OptimizedCompilationJob* job, Isolate* isolate) {
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
+bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
+ Isolate* isolate,
+ OptimizedCompilationInfo* compilation_info,
+ CodeKind code_kind, Handle<JSFunction> function) {
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -829,21 +971,35 @@ bool GetOptimizedCodeLater(OptimizedCompilationJob* job, Isolate* isolate) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentPrepare");
- if (job->PrepareJob(isolate) != CompilationJob::SUCCEEDED) return false;
- isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
+ if (!PrepareJobWithHandleScope(job.get(), isolate, compilation_info))
+ return false;
+
+ // The background recompile will own this job.
+ isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.get());
+ job.release();
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
compilation_info->closure()->ShortPrint();
PrintF(" for concurrent optimization.\n");
}
+
+ // Set the optimization marker and return a code object which checks it.
+ if (!IsForNativeContextIndependentCachingOnly(code_kind)) {
+ // Cached NCI code currently does not use the optimization marker field.
+ function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
+ }
+ DCHECK(function->ActiveTierIsIgnition());
+ DCHECK(function->shared().HasBytecodeArray());
return true;
}
MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
- ConcurrencyMode mode,
+ ConcurrencyMode mode, CodeKind code_kind,
BailoutId osr_offset = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr) {
+ DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
+
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
@@ -855,13 +1011,18 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
- return MaybeHandle<Code>();
+ return {};
}
- if (isolate->debug()->needs_check_on_function_call()) {
- // Do not optimize when debugger needs to hook into every call.
- return MaybeHandle<Code>();
- }
+ // Do not optimize when debugger needs to hook into every call.
+ if (isolate->debug()->needs_check_on_function_call()) return {};
+
+ // Do not use TurboFan if we need to be able to set break points.
+ if (shared->HasBreakInfo()) return {};
+
+ // Do not use TurboFan if optimization is disabled or function doesn't pass
+ // turbo_filter.
+ if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) return {};
// If code was pending optimization for testing, delete remove the entry
// from the table that was preventing the bytecode from being flushed
@@ -869,25 +1030,30 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
- Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeCache(function, osr_offset)
- .ToHandle(&cached_code)) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[found optimized code for ");
- function->ShortPrint(scope.file());
- if (!osr_offset.IsNone()) {
- PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
- }
- PrintF(scope.file(), "]\n");
+ if (!IsForNativeContextIndependentCachingOnly(code_kind)) {
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeCache(function, osr_offset)
+ .ToHandle(&cached_code)) {
+ CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset);
+ return cached_code;
}
- return cached_code;
}
// Reset profiler ticks, function is no longer considered hot.
DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0);
+ if (CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
+ osr_offset == BailoutId::None()) {
+ // Don't generate NCI code when we've already done so in the past.
+ Handle<Code> cached_code;
+ if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
+ if (FLAG_trace_turbo_nci)
+ CompilationCacheCode::TraceHit(shared, cached_code);
+ return cached_code;
+ }
+ }
+
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
@@ -901,55 +1067,26 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<OptimizedCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(isolate, function, has_script,
- osr_offset, osr_frame));
+ compiler::Pipeline::NewCompilationJob(isolate, function, code_kind,
+ has_script, osr_offset, osr_frame));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
- // Do not use TurboFan if we need to be able to set break points.
- if (compilation_info->shared_info()->HasBreakInfo()) {
- compilation_info->AbortOptimization(BailoutReason::kFunctionBeingDebugged);
- return MaybeHandle<Code>();
- }
-
- // Do not use TurboFan if optimization is disabled or function doesn't pass
- // turbo_filter.
- if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) {
- compilation_info->AbortOptimization(BailoutReason::kOptimizationDisabled);
- return MaybeHandle<Code>();
- }
-
- // In case of concurrent recompilation, all handles below this point will be
- // allocated in a deferred handle scope that is detached and handed off to
- // the background thread when we return.
- base::Optional<CompilationHandleScope> compilation;
- if (mode == ConcurrencyMode::kConcurrent) {
- compilation.emplace(isolate, compilation_info);
- }
-
- // All handles below will be canonicalized.
- CanonicalHandleScope canonical(isolate);
-
- // Reopen handles in the new CompilationHandleScope.
- compilation_info->ReopenHandlesInNewHandleScope(isolate);
-
+ // Prepare the job and launch cocncurrent compilation, or compile now.
if (mode == ConcurrencyMode::kConcurrent) {
- if (GetOptimizedCodeLater(job.get(), isolate)) {
- job.release(); // The background recompile job owns this now.
-
- // Set the optimization marker and return a code object which checks it.
- function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
- DCHECK(function->IsInterpreted() ||
- (!function->is_compiled() && function->shared().IsInterpreted()));
- DCHECK(function->shared().HasBytecodeArray());
+ if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
+ code_kind, function)) {
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
} else {
- if (GetOptimizedCodeNow(job.get(), isolate))
+ DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
+ if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
+ InsertCodeIntoCompilationCache(isolate, compilation_info);
return compilation_info->code();
+ }
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return MaybeHandle<Code>();
+ return {};
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1090,7 +1227,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
- isolate)) {
+ isolate, parsing::ReportStatisticsMode::kYes)) {
+ FailWithPendingException(isolate, script, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
// Measure how long it takes to do the compilation; only take the
@@ -1114,7 +1253,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
isolate, shared_info, script, parse_info, isolate->allocator(),
- is_compiled_scope, &finalize_unoptimized_compilation_data_list)) {
+ is_compiled_scope, &finalize_unoptimized_compilation_data_list,
+ nullptr)) {
FailWithPendingException(isolate, script, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
@@ -1129,34 +1269,68 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
return shared_info;
}
-std::unique_ptr<UnoptimizedCompilationJob> CompileOnBackgroundThread(
+RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
+ ParseInfo* parse_info) {
+ if (parse_info->flags().is_toplevel()) {
+ if (parse_info->flags().is_eval()) {
+ return RuntimeCallCounterId::kCompileBackgroundEval;
+ }
+ return RuntimeCallCounterId::kCompileBackgroundScript;
+ }
+ return RuntimeCallCounterId::kCompileBackgroundFunction;
+}
+
+MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* inner_function_jobs) {
+ Handle<Script> script, LocalIsolate* isolate,
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data_list,
+ DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread,
+ IsCompiledScope* is_compiled_scope) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(parse_info));
+
+ Handle<SharedFunctionInfo> shared_info =
+ CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
+
+ if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ isolate, shared_info, script, parse_info, allocator,
+ is_compiled_scope, finalize_unoptimized_compilation_data_list,
+ jobs_to_retry_finalization_on_main_thread)) {
+ return kNullMaybeHandle;
+ }
+
+ // Character stream shouldn't be used again.
+ parse_info->ResetCharacterStream();
+
+ return shared_info;
+}
+
+// TODO(leszeks): Remove this once off-thread finalization is always on.
+void CompileOnBackgroundThread(ParseInfo* parse_info,
+ AccountingAllocator* allocator,
+ UnoptimizedCompilationJobList* jobs) {
DisallowHeapAccess no_heap_access;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
- parse_info->flags().is_toplevel()
- ? parse_info->flags().is_eval()
- ? RuntimeCallCounterId::kCompileBackgroundEval
- : RuntimeCallCounterId::kCompileBackgroundScript
- : RuntimeCallCounterId::kCompileBackgroundFunction);
+ RuntimeCallCounterIdForCompileBackground(parse_info));
// Generate the unoptimized bytecode or asm-js data.
- DCHECK(inner_function_jobs->empty());
+ DCHECK(jobs->empty());
- // TODO(leszeks): Once we can handle asm-js without bailing out of
- // off-thread finalization entirely, and the finalization is off-thread by
- // default, this can be changed to the iterative version.
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job =
- RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, parse_info->literal(), allocator, inner_function_jobs);
+ bool success = RecursivelyExecuteUnoptimizedCompilationJobs(
+ parse_info, parse_info->literal(), allocator, jobs);
+
+ USE(success);
+ DCHECK_EQ(success, !jobs->empty());
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
-
- return outer_function_job;
}
MaybeHandle<SharedFunctionInfo> CompileToplevel(
@@ -1168,6 +1342,24 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
} // namespace
+CompilationHandleScope::~CompilationHandleScope() {
+ info_->set_persistent_handles(persistent_.Detach());
+}
+
+FinalizeUnoptimizedCompilationData::FinalizeUnoptimizedCompilationData(
+ LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
+ base::TimeDelta time_taken_to_execute,
+ base::TimeDelta time_taken_to_finalize)
+ : time_taken_to_execute_(time_taken_to_execute),
+ time_taken_to_finalize_(time_taken_to_finalize),
+ function_handle_(isolate->heap()->NewPersistentHandle(function_handle)) {}
+
+DeferredFinalizationJobData::DeferredFinalizationJobData(
+ LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
+ std::unique_ptr<UnoptimizedCompilationJob> job)
+ : function_handle_(isolate->heap()->NewPersistentHandle(function_handle)),
+ job_(std::move(job)) {}
+
BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate)
: flags_(UnoptimizedCompileFlags::ForToplevelCompile(
@@ -1175,6 +1367,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
REPLMode::kNo)),
compile_state_(isolate),
info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
+ isolate_for_local_isolate_(nullptr),
start_position_(0),
end_position_(0),
function_literal_id_(kFunctionLiteralIdTopLevel),
@@ -1199,8 +1392,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
finalize_on_background_thread_ =
FLAG_finalize_streaming_on_background && !flags_.block_coverage_enabled();
if (finalize_on_background_thread()) {
- off_thread_isolate_ =
- std::make_unique<OffThreadIsolate>(isolate, info_->zone());
+ isolate_for_local_isolate_ = isolate;
}
}
@@ -1214,6 +1406,7 @@ BackgroundCompileTask::BackgroundCompileTask(
compile_state_(*outer_parse_info->state()),
info_(ParseInfo::ForToplevelFunction(flags_, &compile_state_,
function_literal, function_name)),
+ isolate_for_local_isolate_(nullptr),
start_position_(function_literal->start_position()),
end_position_(function_literal->end_position()),
function_literal_id_(function_literal->function_literal_id()),
@@ -1277,27 +1470,9 @@ class OffThreadParseInfoScope {
DISALLOW_COPY_AND_ASSIGN(OffThreadParseInfoScope);
};
-bool CanOffThreadFinalizeAllJobs(
- UnoptimizedCompilationJob* outer_job,
- const UnoptimizedCompilationJobList& inner_function_jobs) {
- if (!outer_job->can_off_thread_finalize()) return false;
-
- for (auto& job : inner_function_jobs) {
- if (!job->can_off_thread_finalize()) {
- return false;
- }
- }
-
- return true;
-}
-
} // namespace
void BackgroundCompileTask::Run() {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHeapAccess no_heap_access;
-
TimedHistogramScope timer(timer_);
base::Optional<OffThreadParseInfoScope> off_thread_scope(
base::in_place, info_.get(), worker_thread_runtime_call_stats_,
@@ -1319,87 +1494,76 @@ void BackgroundCompileTask::Run() {
parser_->ParseOnBackground(info_.get(), start_position_, end_position_,
function_literal_id_);
- if (info_->literal() != nullptr) {
- // Parsing has succeeded, compile.
- outer_function_job_ = CompileOnBackgroundThread(
- info_.get(), compile_state_.allocator(), &inner_function_jobs_);
- }
- // Save the language mode and record whether we collected source positions.
- language_mode_ = info_->language_mode();
- // We don't currently support off-thread finalization for some jobs (namely,
- // asm.js), so release the off-thread isolate and fall back to main-thread
- // finalization.
- // TODO(leszeks): Still finalize Ignition tasks on the background thread,
- // and fallback to main-thread finalization for asm.js jobs only.
- finalize_on_background_thread_ =
- finalize_on_background_thread_ && outer_function_job_ &&
- CanOffThreadFinalizeAllJobs(outer_function_job(), *inner_function_jobs());
+ // Save the language mode.
+ language_mode_ = info_->language_mode();
if (!finalize_on_background_thread_) {
- off_thread_isolate_.reset();
- return;
- }
+ if (info_->literal() != nullptr) {
+ CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
+ &compilation_jobs_);
+ }
+ } else {
+ DCHECK(info_->flags().is_toplevel());
- // ---
- // At this point, off-thread compilation has completed and we are off-thread
- // finalizing.
- // ---
+ LocalIsolate isolate(isolate_for_local_isolate_);
+ LocalHandleScope handle_scope(&isolate);
- DCHECK(info_->flags().is_toplevel());
+ info_->ast_value_factory()->Internalize(&isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground");
+ // We don't have the script source, origin, or details yet, so use default
+ // values for them. These will be fixed up during the main-thread merge.
+ Handle<Script> script =
+ info_->CreateScript(&isolate, isolate.factory()->empty_string(),
+ kNullMaybeHandle, ScriptOriginOptions());
- OffThreadIsolate* isolate = off_thread_isolate();
- isolate->PinToCurrentThread();
+ parser_->HandleSourceURLComments(&isolate, script);
- OffThreadHandleScope handle_scope(isolate);
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (info_->literal() != nullptr) {
+ maybe_result = CompileAndFinalizeOnBackgroundThread(
+ info_.get(), compile_state_.allocator(), script, &isolate,
+ &finalize_unoptimized_compilation_data_,
+ &jobs_to_retry_finalization_on_main_thread_, &is_compiled_scope_);
+ } else {
+ DCHECK(compile_state_.pending_error_handler()->has_pending_error());
+ PreparePendingException(&isolate, info_.get());
+ }
- // We don't have the script source, origin, or details yet, so use default
- // values for them. These will be fixed up during the main-thread merge.
- Handle<Script> script =
- info_->CreateScript(isolate, isolate->factory()->empty_string(),
- kNullMaybeHandle, ScriptOriginOptions());
+ outer_function_sfi_ =
+ isolate.heap()->NewPersistentMaybeHandle(maybe_result);
+ script_ = isolate.heap()->NewPersistentHandle(script);
- MaybeHandle<SharedFunctionInfo> maybe_result;
- if (info_->literal() != nullptr) {
- info_->ast_value_factory()->Internalize(isolate);
+ persistent_handles_ = isolate.heap()->DetachPersistentHandles();
- Handle<SharedFunctionInfo> shared_info =
- CreateTopLevelSharedFunctionInfo(info_.get(), script, isolate);
- if (FinalizeAllUnoptimizedCompilationJobs(
- info_.get(), isolate, shared_info, outer_function_job_.get(),
- &inner_function_jobs_, &finalize_unoptimized_compilation_data_)) {
- maybe_result = shared_info;
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground.ReleaseParser");
+ DCHECK_EQ(language_mode_, info_->language_mode());
+ off_thread_scope.reset();
+ parser_.reset();
+ info_.reset();
}
-
- parser_->HandleSourceURLComments(isolate, script);
- } else {
- DCHECK(!outer_function_job_);
}
+}
+MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::GetOuterFunctionSfi(
+ Isolate* isolate) {
+ // outer_function_sfi_ is a persistent Handle, tied to the lifetime of the
+ // persistent_handles_ member, so create a new Handle to let it outlive
+ // the BackgroundCompileTask.
Handle<SharedFunctionInfo> result;
- if (!maybe_result.ToHandle(&result)) {
- DCHECK(compile_state_.pending_error_handler()->has_pending_error());
- PreparePendingException(isolate, info_.get());
+ if (outer_function_sfi_.ToHandle(&result)) {
+ return handle(*result, isolate);
}
+ return kNullMaybeHandle;
+}
- outer_function_sfi_ = isolate->TransferHandle(maybe_result);
- script_ = isolate->TransferHandle(script);
-
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.Finish");
- isolate->FinishOffThread();
-
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.ReleaseParser");
- DCHECK_EQ(language_mode_, info_->language_mode());
- off_thread_scope.reset();
- parser_.reset();
- info_.reset();
- outer_function_job_.reset();
- inner_function_jobs_.clear();
+Handle<Script> BackgroundCompileTask::GetScript(Isolate* isolate) {
+ // script_ is a persistent Handle, tied to the lifetime of the
+ // persistent_handles_ member, so create a new Handle to let it outlive
+ // the BackgroundCompileTask.
+ return handle(*script_, isolate);
}
// ----------------------------------------------------------------------------
@@ -1456,7 +1620,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
if (!parsing::ParseAny(&parse_info, shared_info, isolate,
- parsing::ReportErrorsAndStatisticsMode::kNo)) {
+ parsing::ReportStatisticsMode::kNo)) {
// Parsing failed probably as a result of stack exhaustion.
bytecode->SetSourcePositionsFailedToCollect();
return FailAndClearPendingException(isolate);
@@ -1494,10 +1658,11 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
}
DCHECK(!isolate->has_pending_exception());
- DCHECK(shared_info->is_compiled_scope().is_compiled());
+ DCHECK(shared_info->is_compiled_scope(isolate).is_compiled());
return true;
}
+// static
bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
@@ -1534,7 +1699,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (!dispatcher->FinishNow(shared_info)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
- *is_compiled_scope = shared_info->is_compiled_scope();
+ *is_compiled_scope = shared_info->is_compiled_scope(isolate);
DCHECK(is_compiled_scope->is_compiled());
return true;
}
@@ -1548,7 +1713,8 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Parse and update ParseInfo with the results.
- if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
+ if (!parsing::ParseAny(&parse_info, shared_info, isolate,
+ parsing::ReportStatisticsMode::kYes)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
@@ -1558,7 +1724,8 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
isolate, shared_info, script, &parse_info, isolate->allocator(),
- is_compiled_scope, &finalize_unoptimized_compilation_data_list)) {
+ is_compiled_scope, &finalize_unoptimized_compilation_data_list,
+ nullptr)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
@@ -1570,13 +1737,14 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
return true;
}
+// static
bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
- // We should never reach here if the function is already compiled or optimized
+ // We should never reach here if the function is already compiled or
+ // optimized.
DCHECK(!function->is_compiled());
- DCHECK(!function->IsOptimized());
DCHECK(!function->HasOptimizationMarker());
- DCHECK(!function->HasOptimizedCode());
+ DCHECK(!function->HasAvailableOptimizedCode());
// Reset the JSFunction if we are recompiling due to the bytecode having been
// flushed.
@@ -1586,7 +1754,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
// Ensure shared function info is compiled.
- *is_compiled_scope = shared_info->is_compiled_scope();
+ *is_compiled_scope = shared_info->is_compiled_scope(isolate);
if (!is_compiled_scope->is_compiled() &&
!Compile(shared_info, flag, is_compiled_scope)) {
return false;
@@ -1595,20 +1763,17 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Initialize the feedback cell for this JSFunction.
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, is_compiled_scope);
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[optimizing ");
- function->ShortPrint(scope.file());
- PrintF(scope.file(), " because --always-opt]\n");
- }
- Handle<Code> opt_code;
- if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent)
- .ToHandle(&opt_code)) {
- code = opt_code;
+ CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function);
+
+ Handle<Code> maybe_code;
+ if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
+ CodeKindForTopTier())
+ .ToHandle(&maybe_code)) {
+ code = maybe_code;
}
}
@@ -1622,6 +1787,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
return true;
}
+// static
bool Compiler::FinalizeBackgroundCompileTask(
BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ClearExceptionFlag flag) {
@@ -1642,7 +1808,7 @@ bool Compiler::FinalizeBackgroundCompileTask(
task->parser()->UpdateStatistics(isolate, script);
task->parser()->HandleSourceURLComments(isolate, script);
- if (parse_info->literal() == nullptr || !task->outer_function_job()) {
+ if (task->compilation_jobs()->empty()) {
// Parsing or compile failed on background thread - report error messages.
return FailWithPendingException(isolate, script, parse_info, flag);
}
@@ -1650,8 +1816,7 @@ bool Compiler::FinalizeBackgroundCompileTask(
// Parsing has succeeded - finalize compilation.
parse_info->ast_value_factory()->Internalize(isolate);
if (!FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, shared_info, task->outer_function_job(),
- task->inner_function_jobs(),
+ parse_info, isolate, script, task->compilation_jobs(),
task->finalize_unoptimized_compilation_data())) {
// Finalization failed - throw an exception.
return FailWithPendingException(isolate, script, parse_info, flag);
@@ -1665,15 +1830,18 @@ bool Compiler::FinalizeBackgroundCompileTask(
return true;
}
+// static
bool Compiler::CompileOptimized(Handle<JSFunction> function,
- ConcurrencyMode mode) {
- if (function->IsOptimized()) return true;
+ ConcurrencyMode mode, CodeKind code_kind) {
+ DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
+
+ if (function->HasAttachedOptimizedCode()) return true;
+
Isolate* isolate = function->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
- // Start a compilation.
Handle<Code> code;
- if (!GetOptimizedCode(function, mode).ToHandle(&code)) {
+ if (!GetOptimizedCode(function, mode, code_kind).ToHandle(&code)) {
// Optimization failed, get unoptimized code. Unoptimized code must exist
// already if we are optimizing.
DCHECK(!isolate->has_pending_exception());
@@ -1682,8 +1850,9 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
- // Install code on closure.
- function->set_code(*code);
+ if (!IsForNativeContextIndependentCachingOnly(code_kind)) {
+ function->set_code(*code);
+ }
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
@@ -1698,12 +1867,14 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
return true;
}
+// static
MaybeHandle<SharedFunctionInfo> Compiler::CompileForLiveEdit(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate) {
IsCompiledScope is_compiled_scope;
return CompileToplevel(parse_info, script, isolate, &is_compiled_scope);
}
+// static
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
@@ -1744,7 +1915,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (eval_result.has_shared()) {
shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
script = Handle<Script>(Script::cast(shared_info->script()), isolate);
- is_compiled_scope = shared_info->is_compiled_scope();
+ is_compiled_scope = shared_info->is_compiled_scope(isolate);
allow_eval_cache = true;
} else {
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForToplevelCompile(
@@ -1801,7 +1972,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Make sure to cache this result.
Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
@@ -1813,7 +1984,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
// we didn't retrieve from there.
@@ -1881,6 +2052,8 @@ bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
// - source.is_null() && !unknown_object: compilation should be blocked.
//
// - !source_is_null() and unknown_object can't be true at the same time.
+
+// static
std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
Isolate* isolate, Handle<Context> context,
Handle<i::Object> original_source) {
@@ -1926,6 +2099,7 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
return {MaybeHandle<String>(), !original_source->IsString()};
}
+// static
MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
Handle<Context> context, MaybeHandle<String> source,
ParseRestriction restriction, int parameters_end_pos) {
@@ -1953,6 +2127,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
eval_scope_position, eval_position);
}
+// static
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
Handle<Context> context, Handle<Object> source,
ParseRestriction restriction, int parameters_end_pos) {
@@ -2281,6 +2456,16 @@ bool CanBackgroundCompile(const Compiler::ScriptDetails& script_details,
natives == NOT_NATIVES_CODE;
}
+bool CompilationExceptionIsRangeError(Isolate* isolate, Handle<Object> obj) {
+ if (!obj->IsJSError(isolate)) return false;
+ Handle<JSReceiver> js_obj = Handle<JSReceiver>::cast(obj);
+ Handle<JSReceiver> constructor;
+ if (!JSReceiver::GetConstructor(js_obj).ToHandle(&constructor)) {
+ return false;
+ }
+ return *constructor == *isolate->range_error_function();
+}
+
MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
Handle<String> source, const Compiler::ScriptDetails& script_details,
ScriptOriginOptions origin_options, Isolate* isolate,
@@ -2293,6 +2478,7 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
CHECK(background_compile_thread.Start());
MaybeHandle<SharedFunctionInfo> main_thread_maybe_result;
+ bool main_thread_had_stack_overflow = false;
// In parallel, compile on the main thread to flush out any data races.
{
IsCompiledScope inner_is_compiled_scope;
@@ -2305,7 +2491,14 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
main_thread_maybe_result = CompileScriptOnMainThread(
flags_copy, source, script_details, origin_options, NOT_NATIVES_CODE,
nullptr, isolate, &inner_is_compiled_scope);
+ if (main_thread_maybe_result.is_null()) {
+ // Assume all range errors are stack overflows.
+ main_thread_had_stack_overflow = CompilationExceptionIsRangeError(
+ isolate, handle(isolate->pending_exception(), isolate));
+ isolate->clear_pending_exception();
+ }
}
+
// Join with background thread and finalize compilation.
background_compile_thread.Join();
MaybeHandle<SharedFunctionInfo> maybe_result =
@@ -2313,13 +2506,22 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
isolate, source, script_details, origin_options,
background_compile_thread.data());
- // Either both compiles should succeed, or both should fail.
+ // Either both compiles should succeed, or both should fail. The one exception
+ // to this is that the main-thread compilation might stack overflow while the
+ // background compilation doesn't, so relax the check to include this case.
// TODO(leszeks): Compare the contents of the results of the two compiles.
- CHECK_EQ(maybe_result.is_null(), main_thread_maybe_result.is_null());
+ if (main_thread_had_stack_overflow) {
+ CHECK(main_thread_maybe_result.is_null());
+ } else {
+ CHECK_EQ(maybe_result.is_null(), main_thread_maybe_result.is_null());
+ }
Handle<SharedFunctionInfo> result;
if (maybe_result.ToHandle(&result)) {
- *is_compiled_scope = result->is_compiled_scope();
+ // The BackgroundCompileTask's IsCompiledScope will keep the result alive
+ // until it dies at the end of this function, after which this new
+ // IsCompiledScope can take over.
+ *is_compiled_scope = result->is_compiled_scope(isolate);
}
return maybe_result;
@@ -2327,6 +2529,7 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
} // namespace
+// static
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
const Compiler::ScriptDetails& script_details,
@@ -2381,7 +2584,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
.ToHandle(&inner_result) &&
inner_result->is_compiled()) {
// Promote to per-isolate compilation cache.
- is_compiled_scope = inner_result->is_compiled_scope();
+ is_compiled_scope = inner_result->is_compiled_scope(isolate);
DCHECK(is_compiled_scope.is_compiled());
compilation_cache->PutScript(source, isolate->native_context(),
language_mode, inner_result);
@@ -2431,6 +2634,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+// static
MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
Handle<Context> context, const Compiler::ScriptDetails& script_details,
@@ -2513,7 +2717,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
}
DCHECK(!wrapped.is_null());
} else {
- is_compiled_scope = wrapped->is_compiled_scope();
+ is_compiled_scope = wrapped->is_compiled_scope(isolate);
script = Handle<Script>(Script::cast(wrapped->script()), isolate);
}
DCHECK(is_compiled_scope.is_compiled());
@@ -2522,6 +2726,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
wrapped, context, AllocationType::kYoung);
}
+// static
MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
@@ -2567,12 +2772,26 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish");
- task->off_thread_isolate()->Publish(isolate);
+ script = task->GetScript(isolate);
+
+ // We might not have been able to finalize all jobs on the background
+ // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
+ if (FinalizeDeferredUnoptimizedCompilationJobs(
+ isolate, script,
+ task->jobs_to_retry_finalization_on_main_thread(),
+ task->compile_state()->pending_error_handler(),
+ task->finalize_unoptimized_compilation_data())) {
+ maybe_result = task->GetOuterFunctionSfi(isolate);
+ }
- maybe_result = task->outer_function_sfi();
- script = task->script();
script->set_source(*source);
script->set_origin_options(origin_options);
+
+ // The one post-hoc fix-up: Add the script to the script list.
+ Handle<WeakArrayList> scripts = isolate->factory()->script_list();
+ scripts = WeakArrayList::Append(isolate, scripts,
+ MaybeObjectHandle::Weak(script));
+ isolate->heap()->SetRootScriptList(*scripts);
} else {
ParseInfo* parse_info = task->info();
DCHECK(parse_info->flags().is_toplevel());
@@ -2583,15 +2802,16 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
task->parser()->UpdateStatistics(isolate, script);
task->parser()->HandleSourceURLComments(isolate, script);
- if (parse_info->literal() != nullptr && task->outer_function_job()) {
+ if (!task->compilation_jobs()->empty()) {
// Off-thread parse & compile has succeeded - finalize compilation.
+ DCHECK_NOT_NULL(parse_info->literal());
+
parse_info->ast_value_factory()->Internalize(isolate);
Handle<SharedFunctionInfo> shared_info =
CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
if (FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, shared_info, task->outer_function_job(),
- task->inner_function_jobs(),
+ parse_info, isolate, script, task->compilation_jobs(),
task->finalize_unoptimized_compilation_data())) {
maybe_result = shared_info;
}
@@ -2635,6 +2855,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
return maybe_result;
}
+// static
template <typename LocalIsolate>
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate) {
@@ -2684,21 +2905,23 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, Isolate* isolate);
template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
- FunctionLiteral* literal, Handle<Script> script, OffThreadIsolate* isolate);
+ FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
+// static
MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
BailoutId osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_offset.IsNone());
DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent, osr_offset,
- osr_frame);
+ return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
+ CodeKindForTopTier(), osr_offset, osr_frame);
}
+// static
bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
Isolate* isolate) {
VMState<COMPILER> state(isolate);
- // Take ownership of compilation job. Deleting job also tears down the zone.
+ // Take ownership of the job. Deleting the job also tears down the zone.
std::unique_ptr<OptimizedCompilationJob> job_scope(job);
OptimizedCompilationInfo* compilation_info = job->compilation_info();
@@ -2710,8 +2933,12 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- // Reset profiler ticks, function is no longer considered hot.
- compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
+ const bool should_install_code_on_function =
+ !IsForNativeContextIndependentCachingOnly(compilation_info);
+ if (should_install_code_on_function) {
+ // Reset profiler ticks, function is no longer considered hot.
+ compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
+ }
DCHECK(!shared->HasBreakInfo());
@@ -2729,25 +2956,17 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
InsertCodeIntoOptimizedCodeCache(compilation_info);
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[completed optimizing ");
- compilation_info->closure()->ShortPrint(scope.file());
- PrintF(scope.file(), "]\n");
+ InsertCodeIntoCompilationCache(isolate, compilation_info);
+ CompilerTracer::TraceCompletedJob(isolate, compilation_info);
+ if (should_install_code_on_function) {
+ compilation_info->closure()->set_code(*compilation_info->code());
}
- compilation_info->closure()->set_code(*compilation_info->code());
return CompilationJob::SUCCEEDED;
}
}
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[aborted optimizing ");
- compilation_info->closure()->ShortPrint(scope.file());
- PrintF(scope.file(), " because: %s]\n",
- GetBailoutReason(compilation_info->bailout_reason()));
- }
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
compilation_info->closure()->set_code(shared->GetCode());
// Clear the InOptimizationQueue marker, if it exists.
if (compilation_info->closure()->IsInOptimizationQueue()) {
@@ -2756,15 +2975,16 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
return CompilationJob::FAILED;
}
+// static
void Compiler::PostInstantiation(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
// If code is compiled to bytecode (i.e., isn't asm.js), then allocate a
// feedback and check for optimized code.
if (is_compiled_scope.is_compiled() && shared->HasBytecodeArray()) {
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, &is_compiled_scope);
Code code = function->has_feedback_vector()
? function->feedback_vector().optimized_code()
@@ -2777,9 +2997,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
}
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
- !shared->optimization_disabled() && !function->IsOptimized() &&
- !function->HasOptimizedCode()) {
- JSFunction::EnsureFeedbackVector(function);
+ !shared->optimization_disabled() &&
+ !function->HasAvailableOptimizedCode()) {
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index b851d6abd6..2af1baa277 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -12,7 +12,8 @@
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
-#include "src/execution/off-thread-isolate.h"
+#include "src/execution/local-isolate.h"
+#include "src/handles/persistent-handles.h"
#include "src/logging/code-events.h"
#include "src/objects/contexts.h"
#include "src/parsing/parse-info.h"
@@ -43,6 +44,10 @@ class WorkerThreadRuntimeCallStats;
using UnoptimizedCompilationJobList =
std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
+inline bool ShouldSpawnExtraNativeContextIndependentCompilationJob() {
+ return FLAG_turbo_nci && !FLAG_turbo_nci_as_highest_tier;
+}
+
// The V8 compiler API.
//
// This is the central hub for dispatching to the various compilers within V8.
@@ -68,7 +73,8 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
IsCompiledScope* is_compiled_scope);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
- static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
+ static bool CompileOptimized(Handle<JSFunction> function,
+ ConcurrencyMode mode, CodeKind code_kind);
// Collect source positions for a function that has already been compiled to
// bytecode, but for which source positions were not collected (e.g. because
@@ -196,7 +202,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// thread. The current state of the job can be checked using {state()}.
class V8_EXPORT_PRIVATE CompilationJob {
public:
- enum Status { SUCCEEDED, FAILED };
+ enum Status { SUCCEEDED, FAILED, RETRY_ON_MAIN_THREAD };
enum class State {
kReadyToPrepare,
kReadyToExecute,
@@ -218,10 +224,16 @@ class V8_EXPORT_PRIVATE CompilationJob {
}
V8_WARN_UNUSED_RESULT Status UpdateState(Status status, State next_state) {
- if (status == SUCCEEDED) {
- state_ = next_state;
- } else {
- state_ = State::kFailed;
+ switch (status) {
+ case SUCCEEDED:
+ state_ = next_state;
+ break;
+ case FAILED:
+ state_ = State::kFailed;
+ break;
+ case RETRY_ON_MAIN_THREAD:
+ // Don't change the state, we'll re-try on the main thread.
+ break;
}
return status;
}
@@ -241,17 +253,12 @@ class V8_EXPORT_PRIVATE CompilationJob {
// Either of phases can either fail or succeed.
class UnoptimizedCompilationJob : public CompilationJob {
public:
- enum class CanOffThreadFinalize : bool { kYes = true, kNo = false };
-
UnoptimizedCompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
- UnoptimizedCompilationInfo* compilation_info,
- CanOffThreadFinalize can_off_thread_finalize)
+ UnoptimizedCompilationInfo* compilation_info)
: CompilationJob(State::kReadyToExecute),
stack_limit_(stack_limit),
parse_info_(parse_info),
- compilation_info_(compilation_info),
- can_off_thread_finalize_(can_off_thread_finalize ==
- CanOffThreadFinalize::kYes) {}
+ compilation_info_(compilation_info) {}
// Executes the compile job. Can be called on a background thread.
V8_WARN_UNUSED_RESULT Status ExecuteJob();
@@ -260,16 +267,21 @@ class UnoptimizedCompilationJob : public CompilationJob {
V8_WARN_UNUSED_RESULT Status
FinalizeJob(Handle<SharedFunctionInfo> shared_info, Isolate* isolate);
- // Finalizes the compile job. Can be called on a background thread.
- V8_WARN_UNUSED_RESULT Status FinalizeJob(
- Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate);
+ // Finalizes the compile job. Can be called on a background thread, and might
+ // return RETRY_ON_MAIN_THREAD if the finalization can't be run on the
+ // background thread, and should instead be retried on the foreground thread.
+ V8_WARN_UNUSED_RESULT Status
+ FinalizeJob(Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate);
void RecordCompilationStats(Isolate* isolate) const;
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<SharedFunctionInfo> shared,
Isolate* isolate) const;
- ParseInfo* parse_info() const { return parse_info_; }
+ ParseInfo* parse_info() const {
+ DCHECK_NOT_NULL(parse_info_);
+ return parse_info_;
+ }
UnoptimizedCompilationInfo* compilation_info() const {
return compilation_info_;
}
@@ -283,7 +295,7 @@ class UnoptimizedCompilationJob : public CompilationJob {
return time_taken_to_finalize_;
}
- bool can_off_thread_finalize() const { return can_off_thread_finalize_; }
+ void ClearParseInfo() { parse_info_ = nullptr; }
protected:
// Overridden by the actual implementation.
@@ -291,7 +303,7 @@ class UnoptimizedCompilationJob : public CompilationJob {
virtual Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) = 0;
virtual Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
- OffThreadIsolate* isolate) = 0;
+ LocalIsolate* isolate) = 0;
private:
uintptr_t stack_limit_;
@@ -299,7 +311,6 @@ class UnoptimizedCompilationJob : public CompilationJob {
UnoptimizedCompilationInfo* compilation_info_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
- bool can_off_thread_finalize_;
};
// A base class for optimized compilation jobs.
@@ -369,25 +380,15 @@ class FinalizeUnoptimizedCompilationData {
base::TimeDelta time_taken_to_finalize)
: time_taken_to_execute_(time_taken_to_execute),
time_taken_to_finalize_(time_taken_to_finalize),
- function_handle_(function_handle),
- handle_state_(kHandle) {}
+ function_handle_(function_handle) {}
- FinalizeUnoptimizedCompilationData(OffThreadIsolate* isolate,
+ FinalizeUnoptimizedCompilationData(LocalIsolate* isolate,
Handle<SharedFunctionInfo> function_handle,
base::TimeDelta time_taken_to_execute,
- base::TimeDelta time_taken_to_finalize)
- : time_taken_to_execute_(time_taken_to_execute),
- time_taken_to_finalize_(time_taken_to_finalize),
- function_transfer_handle_(isolate->TransferHandle(function_handle)),
- handle_state_(kTransferHandle) {}
+ base::TimeDelta time_taken_to_finalize);
Handle<SharedFunctionInfo> function_handle() const {
- switch (handle_state_) {
- case kHandle:
- return function_handle_;
- case kTransferHandle:
- return function_transfer_handle_.ToHandle();
- }
+ return function_handle_;
}
base::TimeDelta time_taken_to_execute() const {
@@ -400,16 +401,52 @@ class FinalizeUnoptimizedCompilationData {
private:
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
- union {
- Handle<SharedFunctionInfo> function_handle_;
- OffThreadTransferHandle<SharedFunctionInfo> function_transfer_handle_;
- };
- enum { kHandle, kTransferHandle } handle_state_;
+ Handle<SharedFunctionInfo> function_handle_;
};
using FinalizeUnoptimizedCompilationDataList =
std::vector<FinalizeUnoptimizedCompilationData>;
+class DeferredFinalizationJobData {
+ public:
+ DeferredFinalizationJobData(Isolate* isolate,
+ Handle<SharedFunctionInfo> function_handle,
+ std::unique_ptr<UnoptimizedCompilationJob> job) {
+ UNREACHABLE();
+ }
+ DeferredFinalizationJobData(LocalIsolate* isolate,
+ Handle<SharedFunctionInfo> function_handle,
+ std::unique_ptr<UnoptimizedCompilationJob> job);
+
+ Handle<SharedFunctionInfo> function_handle() const {
+ return function_handle_;
+ }
+
+ UnoptimizedCompilationJob* job() const { return job_.get(); }
+
+ private:
+ Handle<SharedFunctionInfo> function_handle_;
+ std::unique_ptr<UnoptimizedCompilationJob> job_;
+};
+
+// A wrapper around a OptimizedCompilationInfo that detaches the Handles from
+// the underlying PersistentHandlesScope and stores them in info_ on
+// destruction.
+class CompilationHandleScope final {
+ public:
+ explicit CompilationHandleScope(Isolate* isolate,
+ OptimizedCompilationInfo* info)
+ : persistent_(isolate), info_(info) {}
+ ~CompilationHandleScope();
+
+ private:
+ PersistentHandlesScope persistent_;
+ OptimizedCompilationInfo* info_;
+};
+
+using DeferredFinalizationJobDataList =
+ std::vector<DeferredFinalizationJobData>;
+
class V8_EXPORT_PRIVATE BackgroundCompileTask {
public:
// Creates a new task that when run will parse and compile the streamed
@@ -435,34 +472,31 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
return info_.get();
}
Parser* parser() { return parser_.get(); }
- UnoptimizedCompilationJob* outer_function_job() {
- return outer_function_job_.get();
- }
- UnoptimizedCompilationJobList* inner_function_jobs() {
- return &inner_function_jobs_;
+ UnoptimizedCompilationJobList* compilation_jobs() {
+ return &compilation_jobs_;
}
UnoptimizedCompileFlags flags() const { return flags_; }
- const UnoptimizedCompileState* compile_state() const {
- return &compile_state_;
- }
+ UnoptimizedCompileState* compile_state() { return &compile_state_; }
LanguageMode language_mode() { return language_mode_; }
bool finalize_on_background_thread() {
return finalize_on_background_thread_;
}
- OffThreadIsolate* off_thread_isolate() { return off_thread_isolate_.get(); }
- MaybeHandle<SharedFunctionInfo> outer_function_sfi() {
- DCHECK_NOT_NULL(off_thread_isolate_);
- return outer_function_sfi_.ToHandle();
- }
- Handle<Script> script() {
- DCHECK_NOT_NULL(off_thread_isolate_);
- return script_.ToHandle();
- }
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data() {
return &finalize_unoptimized_compilation_data_;
}
+ // Jobs which could not be finalized in the background task, and need to be
+ // finalized on the main thread.
+ DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread() {
+ return &jobs_to_retry_finalization_on_main_thread_;
+ }
+
+ // Getters for the off-thread finalization results, that create main-thread
+ // handles to the objects.
+ MaybeHandle<SharedFunctionInfo> GetOuterFunctionSfi(Isolate* isolate);
+ Handle<Script> GetScript(Isolate* isolate);
+
private:
// Data needed for parsing, and data needed to to be passed between thread
// between parsing and compilation. These need to be initialized before the
@@ -473,17 +507,19 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
std::unique_ptr<Parser> parser_;
// Data needed for finalizing compilation after background compilation.
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job_;
- UnoptimizedCompilationJobList inner_function_jobs_;
+ UnoptimizedCompilationJobList compilation_jobs_;
// Data needed for merging onto the main thread after background finalization.
// TODO(leszeks): When these are available, the above fields are not. We
// should add some stricter type-safety or DCHECKs to ensure that the user of
// the task knows this.
- std::unique_ptr<OffThreadIsolate> off_thread_isolate_;
- OffThreadTransferMaybeHandle<SharedFunctionInfo> outer_function_sfi_;
- OffThreadTransferHandle<Script> script_;
+ Isolate* isolate_for_local_isolate_;
+ std::unique_ptr<PersistentHandles> persistent_handles_;
+ MaybeHandle<SharedFunctionInfo> outer_function_sfi_;
+ Handle<Script> script_;
+ IsCompiledScope is_compiled_scope_;
FinalizeUnoptimizedCompilationDataList finalize_unoptimized_compilation_data_;
+ DeferredFinalizationJobDataList jobs_to_retry_finalization_on_main_thread_;
// Single function data for top-level function compilation.
int start_position_;
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 14c94ebae9..eef98f77e7 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -27,7 +27,7 @@ enum CpuFeature {
POPCNT,
ATOM,
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
ARMv7, // ARMv7-A + VFPv3-D32 + NEON
ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
@@ -39,6 +39,9 @@ enum CpuFeature {
VFP32DREGS = ARMv7,
SUDIV = ARMv7_SUDIV,
+#elif V8_TARGET_ARCH_ARM64
+ JSCVT,
+
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 5c2c63e816..ba71702e7c 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -11,21 +11,21 @@
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/heap/heap.h"
-#include "src/logging/counters.h"
-#include "src/numbers/hash-seed-inl.h"
-#include "src/objects/elements.h"
-#include "src/objects/ordered-hash-table.h"
-// For IncrementalMarking::RecordWriteFromCode. TODO(jkummerow): Drop.
#include "src/execution/isolate.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/simulator-base.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
+#include "src/logging/counters.h"
#include "src/logging/log.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/numbers/math-random.h"
+#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/ordered-hash-table.h"
+#include "src/regexp/experimental/experimental.h"
#include "src/regexp/regexp-interpreter.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-stack.h"
@@ -214,8 +214,8 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
return ExternalReference(Redirect(FUNCTION_ADDR(Target), Type)); \
}
-FUNCTION_REFERENCE(incremental_marking_record_write_function,
- IncrementalMarking::RecordWriteFromCode)
+FUNCTION_REFERENCE(write_barrier_marking_from_code_function,
+ WriteBarrier::MarkingFromCode)
FUNCTION_REFERENCE(insert_remembered_set_function,
Heap::InsertIntoRememberedSetFromCode)
@@ -277,6 +277,14 @@ FUNCTION_REFERENCE(wasm_float32_to_int64, wasm::float32_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float32_to_uint64, wasm::float32_to_uint64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_int64, wasm::float64_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_uint64, wasm::float64_to_uint64_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_int64_sat,
+ wasm::float32_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_uint64_sat,
+ wasm::float32_to_uint64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_int64_sat,
+ wasm::float64_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_uint64_sat,
+ wasm::float64_to_uint64_sat_wrapper)
FUNCTION_REFERENCE(wasm_int64_div, wasm::int64_div_wrapper)
FUNCTION_REFERENCE(wasm_int64_mod, wasm::int64_mod_wrapper)
FUNCTION_REFERENCE(wasm_uint64_div, wasm::uint64_div_wrapper)
@@ -289,6 +297,14 @@ FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
FUNCTION_REFERENCE(wasm_word64_rol, wasm::word64_rol_wrapper)
FUNCTION_REFERENCE(wasm_word64_ror, wasm::word64_ror_wrapper)
+FUNCTION_REFERENCE(wasm_f64x2_ceil, wasm::f64x2_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f64x2_floor, wasm::f64x2_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f64x2_trunc, wasm::f64x2_trunc_wrapper)
+FUNCTION_REFERENCE(wasm_f64x2_nearest_int, wasm::f64x2_nearest_int_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_ceil, wasm::f32x4_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_floor, wasm::f32x4_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_trunc, wasm::f32x4_trunc_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_nearest_int, wasm::f32x4_nearest_int_wrapper)
FUNCTION_REFERENCE(wasm_memory_init, wasm::memory_init_wrapper)
FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
@@ -408,6 +424,18 @@ ExternalReference ExternalReference::address_of_runtime_stats_flag() {
return ExternalReference(&TracingFlags::runtime_stats);
}
+ExternalReference ExternalReference::address_of_load_from_stack_count(
+ const char* function_name) {
+ return ExternalReference(
+ Isolate::load_from_stack_count_address(function_name));
+}
+
+ExternalReference ExternalReference::address_of_store_to_stack_count(
+ const char* function_name) {
+ return ExternalReference(
+ Isolate::store_to_stack_count_address(function_name));
+}
+
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(
reinterpret_cast<Address>(&double_one_half_constant));
@@ -484,12 +512,19 @@ FUNCTION_REFERENCE_WITH_ISOLATE(re_check_stack_guard_state, re_stack_check_func)
FUNCTION_REFERENCE_WITH_ISOLATE(re_grow_stack,
NativeRegExpMacroAssembler::GrowStack)
-FUNCTION_REFERENCE_WITH_ISOLATE(re_match_for_call_from_js,
- IrregexpInterpreter::MatchForCallFromJs)
+FUNCTION_REFERENCE(re_match_for_call_from_js,
+ IrregexpInterpreter::MatchForCallFromJs)
+
+FUNCTION_REFERENCE(re_experimental_match_for_call_from_js,
+ ExperimentalRegExp::MatchForCallFromJs)
+
+FUNCTION_REFERENCE_WITH_ISOLATE(
+ re_case_insensitive_compare_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode)
FUNCTION_REFERENCE_WITH_ISOLATE(
- re_case_insensitive_compare_uc16,
- NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)
+ re_case_insensitive_compare_non_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode)
ExternalReference ExternalReference::re_word_character_map(Isolate* isolate) {
return ExternalReference(
@@ -640,8 +675,8 @@ FUNCTION_REFERENCE(copy_fast_number_jsarray_elements_to_typed_array,
FUNCTION_REFERENCE(copy_typed_array_elements_to_typed_array,
CopyTypedArrayElementsToTypedArray)
FUNCTION_REFERENCE(copy_typed_array_elements_slice, CopyTypedArrayElementsSlice)
-FUNCTION_REFERENCE(try_internalize_string_function,
- StringTable::LookupStringIfExists_NoAllocate)
+FUNCTION_REFERENCE(try_string_to_index_or_lookup_existing,
+ StringTable::TryStringToIndexOrLookupExisting)
FUNCTION_REFERENCE(string_to_array_index_function, String::ToArrayIndex)
static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index f42a7d7486..eaadc6fbad 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -77,146 +77,162 @@ class StatsCounter;
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
- V(re_case_insensitive_compare_uc16, \
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
+ V(re_case_insensitive_compare_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode()") \
+ V(re_case_insensitive_compare_non_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode()") \
V(re_check_stack_guard_state, \
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
- V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
-#define EXTERNAL_REFERENCE_LIST(V) \
- V(abort_with_reason, "abort_with_reason") \
- V(address_of_double_abs_constant, "double_absolute_constant") \
- V(address_of_double_neg_constant, "double_negate_constant") \
- V(address_of_float_abs_constant, "float_absolute_constant") \
- V(address_of_float_neg_constant, "float_negate_constant") \
- V(address_of_min_int, "LDoubleConstant::min_int") \
- V(address_of_mock_arraybuffer_allocator_flag, \
- "FLAG_mock_arraybuffer_allocator") \
- V(address_of_one_half, "LDoubleConstant::one_half") \
- V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
- V(address_of_the_hole_nan, "the_hole_nan") \
- V(address_of_uint32_bias, "uint32_bias") \
- V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
- V(check_object_type, "check_object_type") \
- V(compute_integer_hash, "ComputeSeededHash") \
- V(compute_output_frames_function, "Deoptimizer::ComputeOutputFrames()") \
- V(copy_fast_number_jsarray_elements_to_typed_array, \
- "copy_fast_number_jsarray_elements_to_typed_array") \
- V(copy_typed_array_elements_slice, "copy_typed_array_elements_slice") \
- V(copy_typed_array_elements_to_typed_array, \
- "copy_typed_array_elements_to_typed_array") \
- V(cpu_features, "cpu_features") \
- V(delete_handle_scope_extensions, "HandleScope::DeleteExtensions") \
- V(ephemeron_key_write_barrier_function, \
- "Heap::EphemeronKeyWriteBarrierFromCode") \
- V(f64_acos_wrapper_function, "f64_acos_wrapper") \
- V(f64_asin_wrapper_function, "f64_asin_wrapper") \
- V(f64_mod_wrapper_function, "f64_mod_wrapper") \
- V(get_date_field_function, "JSDate::GetField") \
- V(get_or_create_hash_raw, "get_or_create_hash_raw") \
- V(ieee754_acos_function, "base::ieee754::acos") \
- V(ieee754_acosh_function, "base::ieee754::acosh") \
- V(ieee754_asin_function, "base::ieee754::asin") \
- V(ieee754_asinh_function, "base::ieee754::asinh") \
- V(ieee754_atan_function, "base::ieee754::atan") \
- V(ieee754_atan2_function, "base::ieee754::atan2") \
- V(ieee754_atanh_function, "base::ieee754::atanh") \
- V(ieee754_cbrt_function, "base::ieee754::cbrt") \
- V(ieee754_cos_function, "base::ieee754::cos") \
- V(ieee754_cosh_function, "base::ieee754::cosh") \
- V(ieee754_exp_function, "base::ieee754::exp") \
- V(ieee754_expm1_function, "base::ieee754::expm1") \
- V(ieee754_log_function, "base::ieee754::log") \
- V(ieee754_log10_function, "base::ieee754::log10") \
- V(ieee754_log1p_function, "base::ieee754::log1p") \
- V(ieee754_log2_function, "base::ieee754::log2") \
- V(ieee754_pow_function, "base::ieee754::pow") \
- V(ieee754_sin_function, "base::ieee754::sin") \
- V(ieee754_sinh_function, "base::ieee754::sinh") \
- V(ieee754_tan_function, "base::ieee754::tan") \
- V(ieee754_tanh_function, "base::ieee754::tanh") \
- V(incremental_marking_record_write_function, \
- "IncrementalMarking::RecordWrite") \
- V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \
- V(invalidate_prototype_chains_function, \
- "JSObject::InvalidatePrototypeChains()") \
- V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
- V(invoke_function_callback, "InvokeFunctionCallback") \
- V(jsarray_array_join_concat_to_sequential_string, \
- "jsarray_array_join_concat_to_sequential_string") \
- V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
- V(libc_memchr_function, "libc_memchr") \
- V(libc_memcpy_function, "libc_memcpy") \
- V(libc_memmove_function, "libc_memmove") \
- V(libc_memset_function, "libc_memset") \
- V(mod_two_doubles_operation, "mod_two_doubles") \
- V(mutable_big_int_absolute_add_and_canonicalize_function, \
- "MutableBigInt_AbsoluteAddAndCanonicalize") \
- V(mutable_big_int_absolute_compare_function, \
- "MutableBigInt_AbsoluteCompare") \
- V(mutable_big_int_absolute_sub_and_canonicalize_function, \
- "MutableBigInt_AbsoluteSubAndCanonicalize") \
- V(new_deoptimizer_function, "Deoptimizer::New()") \
- V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
- V(printf_function, "printf") \
- V(refill_math_random, "MathRandom::RefillCache") \
- V(search_string_raw_one_one, "search_string_raw_one_one") \
- V(search_string_raw_one_two, "search_string_raw_one_two") \
- V(search_string_raw_two_one, "search_string_raw_two_one") \
- V(search_string_raw_two_two, "search_string_raw_two_two") \
- V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
- V(string_to_array_index_function, "String::ToArrayIndex") \
- V(try_internalize_string_function, "try_internalize_string_function") \
- V(wasm_call_trap_callback_for_testing, \
- "wasm::call_trap_callback_for_testing") \
- V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
- V(wasm_f32_floor, "wasm::f32_floor_wrapper") \
- V(wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
- V(wasm_f32_trunc, "wasm::f32_trunc_wrapper") \
- V(wasm_f64_ceil, "wasm::f64_ceil_wrapper") \
- V(wasm_f64_floor, "wasm::f64_floor_wrapper") \
- V(wasm_f64_nearest_int, "wasm::f64_nearest_int_wrapper") \
- V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
- V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
- V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
- V(wasm_float64_pow, "wasm::float64_pow") \
- V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
- V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
- V(wasm_int64_div, "wasm::int64_div") \
- V(wasm_int64_mod, "wasm::int64_mod") \
- V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
- V(wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
- V(wasm_uint64_div, "wasm::uint64_div") \
- V(wasm_uint64_mod, "wasm::uint64_mod") \
- V(wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
- V(wasm_uint64_to_float64, "wasm::uint64_to_float64_wrapper") \
- V(wasm_word32_ctz, "wasm::word32_ctz") \
- V(wasm_word32_popcnt, "wasm::word32_popcnt") \
- V(wasm_word32_rol, "wasm::word32_rol") \
- V(wasm_word32_ror, "wasm::word32_ror") \
- V(wasm_word64_rol, "wasm::word64_rol") \
- V(wasm_word64_ror, "wasm::word64_ror") \
- V(wasm_word64_ctz, "wasm::word64_ctz") \
- V(wasm_word64_popcnt, "wasm::word64_popcnt") \
- V(wasm_memory_init, "wasm::memory_init") \
- V(wasm_memory_copy, "wasm::memory_copy") \
- V(wasm_memory_fill, "wasm::memory_fill") \
- V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
- V(call_enter_context_function, "call_enter_context_function") \
- V(atomic_pair_load_function, "atomic_pair_load_function") \
- V(atomic_pair_store_function, "atomic_pair_store_function") \
- V(atomic_pair_add_function, "atomic_pair_add_function") \
- V(atomic_pair_sub_function, "atomic_pair_sub_function") \
- V(atomic_pair_and_function, "atomic_pair_and_function") \
- V(atomic_pair_or_function, "atomic_pair_or_function") \
- V(atomic_pair_xor_function, "atomic_pair_xor_function") \
- V(atomic_pair_exchange_function, "atomic_pair_exchange_function") \
- V(atomic_pair_compare_exchange_function, \
- "atomic_pair_compare_exchange_function") \
- V(js_finalization_registry_remove_cell_from_unregister_token_map, \
- "JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap") \
+#define EXTERNAL_REFERENCE_LIST(V) \
+ V(abort_with_reason, "abort_with_reason") \
+ V(address_of_double_abs_constant, "double_absolute_constant") \
+ V(address_of_double_neg_constant, "double_negate_constant") \
+ V(address_of_float_abs_constant, "float_absolute_constant") \
+ V(address_of_float_neg_constant, "float_negate_constant") \
+ V(address_of_min_int, "LDoubleConstant::min_int") \
+ V(address_of_mock_arraybuffer_allocator_flag, \
+ "FLAG_mock_arraybuffer_allocator") \
+ V(address_of_one_half, "LDoubleConstant::one_half") \
+ V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
+ V(address_of_the_hole_nan, "the_hole_nan") \
+ V(address_of_uint32_bias, "uint32_bias") \
+ V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
+ V(check_object_type, "check_object_type") \
+ V(compute_integer_hash, "ComputeSeededHash") \
+ V(compute_output_frames_function, "Deoptimizer::ComputeOutputFrames()") \
+ V(copy_fast_number_jsarray_elements_to_typed_array, \
+ "copy_fast_number_jsarray_elements_to_typed_array") \
+ V(copy_typed_array_elements_slice, "copy_typed_array_elements_slice") \
+ V(copy_typed_array_elements_to_typed_array, \
+ "copy_typed_array_elements_to_typed_array") \
+ V(cpu_features, "cpu_features") \
+ V(delete_handle_scope_extensions, "HandleScope::DeleteExtensions") \
+ V(ephemeron_key_write_barrier_function, \
+ "Heap::EphemeronKeyWriteBarrierFromCode") \
+ V(f64_acos_wrapper_function, "f64_acos_wrapper") \
+ V(f64_asin_wrapper_function, "f64_asin_wrapper") \
+ V(f64_mod_wrapper_function, "f64_mod_wrapper") \
+ V(get_date_field_function, "JSDate::GetField") \
+ V(get_or_create_hash_raw, "get_or_create_hash_raw") \
+ V(ieee754_acos_function, "base::ieee754::acos") \
+ V(ieee754_acosh_function, "base::ieee754::acosh") \
+ V(ieee754_asin_function, "base::ieee754::asin") \
+ V(ieee754_asinh_function, "base::ieee754::asinh") \
+ V(ieee754_atan_function, "base::ieee754::atan") \
+ V(ieee754_atan2_function, "base::ieee754::atan2") \
+ V(ieee754_atanh_function, "base::ieee754::atanh") \
+ V(ieee754_cbrt_function, "base::ieee754::cbrt") \
+ V(ieee754_cos_function, "base::ieee754::cos") \
+ V(ieee754_cosh_function, "base::ieee754::cosh") \
+ V(ieee754_exp_function, "base::ieee754::exp") \
+ V(ieee754_expm1_function, "base::ieee754::expm1") \
+ V(ieee754_log_function, "base::ieee754::log") \
+ V(ieee754_log10_function, "base::ieee754::log10") \
+ V(ieee754_log1p_function, "base::ieee754::log1p") \
+ V(ieee754_log2_function, "base::ieee754::log2") \
+ V(ieee754_pow_function, "base::ieee754::pow") \
+ V(ieee754_sin_function, "base::ieee754::sin") \
+ V(ieee754_sinh_function, "base::ieee754::sinh") \
+ V(ieee754_tan_function, "base::ieee754::tan") \
+ V(ieee754_tanh_function, "base::ieee754::tanh") \
+ V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \
+ V(invalidate_prototype_chains_function, \
+ "JSObject::InvalidatePrototypeChains()") \
+ V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
+ V(invoke_function_callback, "InvokeFunctionCallback") \
+ V(jsarray_array_join_concat_to_sequential_string, \
+ "jsarray_array_join_concat_to_sequential_string") \
+ V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
+ V(libc_memchr_function, "libc_memchr") \
+ V(libc_memcpy_function, "libc_memcpy") \
+ V(libc_memmove_function, "libc_memmove") \
+ V(libc_memset_function, "libc_memset") \
+ V(mod_two_doubles_operation, "mod_two_doubles") \
+ V(mutable_big_int_absolute_add_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteAddAndCanonicalize") \
+ V(mutable_big_int_absolute_compare_function, \
+ "MutableBigInt_AbsoluteCompare") \
+ V(mutable_big_int_absolute_sub_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteSubAndCanonicalize") \
+ V(new_deoptimizer_function, "Deoptimizer::New()") \
+ V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
+ V(printf_function, "printf") \
+ V(refill_math_random, "MathRandom::RefillCache") \
+ V(search_string_raw_one_one, "search_string_raw_one_one") \
+ V(search_string_raw_one_two, "search_string_raw_one_two") \
+ V(search_string_raw_two_one, "search_string_raw_two_one") \
+ V(search_string_raw_two_two, "search_string_raw_two_two") \
+ V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
+ V(string_to_array_index_function, "String::ToArrayIndex") \
+ V(try_string_to_index_or_lookup_existing, \
+ "try_string_to_index_or_lookup_existing") \
+ V(wasm_call_trap_callback_for_testing, \
+ "wasm::call_trap_callback_for_testing") \
+ V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
+ V(wasm_f32_floor, "wasm::f32_floor_wrapper") \
+ V(wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
+ V(wasm_f32_trunc, "wasm::f32_trunc_wrapper") \
+ V(wasm_f64_ceil, "wasm::f64_ceil_wrapper") \
+ V(wasm_f64_floor, "wasm::f64_floor_wrapper") \
+ V(wasm_f64_nearest_int, "wasm::f64_nearest_int_wrapper") \
+ V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
+ V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
+ V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
+ V(wasm_float32_to_int64_sat, "wasm::float32_to_int64_sat_wrapper") \
+ V(wasm_float32_to_uint64_sat, "wasm::float32_to_uint64_sat_wrapper") \
+ V(wasm_float64_pow, "wasm::float64_pow") \
+ V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
+ V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
+ V(wasm_float64_to_int64_sat, "wasm::float64_to_int64_sat_wrapper") \
+ V(wasm_float64_to_uint64_sat, "wasm::float64_to_uint64_sat_wrapper") \
+ V(wasm_int64_div, "wasm::int64_div") \
+ V(wasm_int64_mod, "wasm::int64_mod") \
+ V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
+ V(wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
+ V(wasm_uint64_div, "wasm::uint64_div") \
+ V(wasm_uint64_mod, "wasm::uint64_mod") \
+ V(wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
+ V(wasm_uint64_to_float64, "wasm::uint64_to_float64_wrapper") \
+ V(wasm_word32_ctz, "wasm::word32_ctz") \
+ V(wasm_word32_popcnt, "wasm::word32_popcnt") \
+ V(wasm_word32_rol, "wasm::word32_rol") \
+ V(wasm_word32_ror, "wasm::word32_ror") \
+ V(wasm_word64_rol, "wasm::word64_rol") \
+ V(wasm_word64_ror, "wasm::word64_ror") \
+ V(wasm_word64_ctz, "wasm::word64_ctz") \
+ V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(wasm_f64x2_ceil, "wasm::f64x2_ceil_wrapper") \
+ V(wasm_f64x2_floor, "wasm::f64x2_floor_wrapper") \
+ V(wasm_f64x2_trunc, "wasm::f64x2_trunc_wrapper") \
+ V(wasm_f64x2_nearest_int, "wasm::f64x2_nearest_int_wrapper") \
+ V(wasm_f32x4_ceil, "wasm::f32x4_ceil_wrapper") \
+ V(wasm_f32x4_floor, "wasm::f32x4_floor_wrapper") \
+ V(wasm_f32x4_trunc, "wasm::f32x4_trunc_wrapper") \
+ V(wasm_f32x4_nearest_int, "wasm::f32x4_nearest_int_wrapper") \
+ V(wasm_memory_init, "wasm::memory_init") \
+ V(wasm_memory_copy, "wasm::memory_copy") \
+ V(wasm_memory_fill, "wasm::memory_fill") \
+ V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
+ V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
+ V(call_enter_context_function, "call_enter_context_function") \
+ V(atomic_pair_load_function, "atomic_pair_load_function") \
+ V(atomic_pair_store_function, "atomic_pair_store_function") \
+ V(atomic_pair_add_function, "atomic_pair_add_function") \
+ V(atomic_pair_sub_function, "atomic_pair_sub_function") \
+ V(atomic_pair_and_function, "atomic_pair_and_function") \
+ V(atomic_pair_or_function, "atomic_pair_or_function") \
+ V(atomic_pair_xor_function, "atomic_pair_xor_function") \
+ V(atomic_pair_exchange_function, "atomic_pair_exchange_function") \
+ V(atomic_pair_compare_exchange_function, \
+ "atomic_pair_compare_exchange_function") \
+ V(js_finalization_registry_remove_cell_from_unregister_token_map, \
+ "JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap") \
+ V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
+ V(re_experimental_match_for_call_from_js, \
+ "ExperimentalRegExp::MatchForCallFromJs") \
EXTERNAL_REFERENCE_LIST_INTL(V)
#ifdef V8_INTL_SUPPORT
@@ -311,6 +327,11 @@ class ExternalReference {
V8_EXPORT_PRIVATE V8_NOINLINE static ExternalReference
runtime_function_table_address_for_unittests(Isolate* isolate);
+ static V8_EXPORT_PRIVATE ExternalReference
+ address_of_load_from_stack_count(const char* function_name);
+ static V8_EXPORT_PRIVATE ExternalReference
+ address_of_store_to_stack_count(const char* function_name);
+
Address address() const { return address_; }
private:
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index fcfb08cb7d..fb49f9fa70 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -11,13 +11,18 @@
#include "src/codegen/assembler-inl.h"
#include "src/objects/code-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
HandlerTable::HandlerTable(Code code)
- : HandlerTable(code.InstructionStart() + code.handler_table_offset(),
- code.handler_table_size(), kReturnAddressBasedEncoding) {}
+ : HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
+ kReturnAddressBasedEncoding) {}
+
+HandlerTable::HandlerTable(const wasm::WasmCode* code)
+ : HandlerTable(code->handler_table(), code->handler_table_size(),
+ kReturnAddressBasedEncoding) {}
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array.handler_table()) {}
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index cf88724682..0445b68da9 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -16,6 +16,10 @@ class Assembler;
class ByteArray;
class BytecodeArray;
+namespace wasm {
+class WasmCode;
+} // namespace wasm
+
// HandlerTable is a byte array containing entries for exception handlers in
// the code object it is associated with. The tables come in two flavors:
// 1) Based on ranges: Used for unoptimized code. Stored in a {ByteArray} that
@@ -54,6 +58,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
// Constructors for the various encodings.
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
+ explicit HandlerTable(const wasm::WasmCode* code);
explicit HandlerTable(BytecodeArray bytecode_array);
HandlerTable(Address handler_table, int handler_table_size,
EncodingMode encoding_mode);
@@ -106,18 +111,18 @@ class V8_EXPORT_PRIVATE HandlerTable {
int GetReturnHandler(int index) const;
// Number of entries in the loaded handler table.
- int number_of_entries_;
+ const int number_of_entries_;
#ifdef DEBUG
// The encoding mode of the table. Mostly useful for debugging to check that
// used accessors and constructors fit together.
- EncodingMode mode_;
+ const EncodingMode mode_;
#endif
- // Direct pointer into the encoded data. This pointer points into objects on
- // the GC heap (either {ByteArray} or {Code}) and hence would become stale
- // during a collection. Hence we disallow any allocation.
- Address raw_encoded_data_;
+ // Direct pointer into the encoded data. This pointer potentially points into
+ // objects on the GC heap (either {ByteArray} or {Code}) and could become
+ // stale during a collection. Hence we disallow any allocation.
+ const Address raw_encoded_data_;
DISALLOW_HEAP_ALLOCATION(no_gc_)
// Layout description for handler table based on ranges.
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 551750936d..321a59cede 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -691,6 +691,29 @@ void Assembler::stos() {
EMIT(0xAB);
}
+void Assembler::xadd(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_b(Operand dst, Register src) {
+ DCHECK(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC0);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_w(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
@@ -2246,6 +2269,30 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x08);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x09);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2921,6 +2968,15 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+void Assembler::vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+
void Assembler::vmovmskps(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 60d978df5b..ab26d36376 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -528,6 +528,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rep_stos();
void stos();
+ void xadd(Operand dst, Register src);
+ void xadd_b(Operand dst, Register src);
+ void xadd_w(Operand dst, Register src);
+
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, Operand src);
@@ -959,6 +963,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movapd(XMMRegister dst, Operand src) {
sse2_instr(dst, src, 0x66, 0x0F, 0x28);
}
+ void movupd(XMMRegister dst, Operand src) {
+ sse2_instr(dst, src, 0x66, 0x0F, 0x10);
+ }
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
@@ -1064,6 +1071,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void pinsrd(XMMRegister dst, Operand src, uint8_t offset);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132sd(dst, src1, Operand(src2));
@@ -1331,6 +1341,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
+ void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vshufps(dst, src1, Operand(src2), imm8);
}
@@ -1409,6 +1420,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 8b1ea8d880..9b96dc1d8c 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -49,11 +49,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return edi;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return eax; }
-
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
const Register LoadDescriptor::SlotRegister() { return eax; }
@@ -195,12 +190,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
@@ -312,6 +301,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index b73050a680..8b1cc91298 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -597,6 +597,28 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
+void TurboAssembler::Roundps(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundps(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundps(dst, src, mode);
+ }
+}
+
+void TurboAssembler::Roundpd(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundpd(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundpd(dst, src, mode);
+ }
+}
+
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -2045,9 +2067,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 94ddb2f784..ef26309a2b 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -286,12 +286,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Movaps, movaps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movupd, movupd, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Pmovmskb, pmovmskb, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movmskps, movmskps, Register, XMMRegister)
@@ -319,6 +322,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
+ AVX_OP3_XO(Por, por)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
@@ -357,6 +361,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+ AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
AVX_PACKED_OP3(Subpd, subpd)
@@ -365,6 +370,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
AVX_PACKED_OP3(Cmpltpd, cmpltpd)
+ AVX_PACKED_OP3(Cmpleps, cmpleps)
AVX_PACKED_OP3(Cmplepd, cmplepd)
AVX_PACKED_OP3(Minps, minps)
AVX_PACKED_OP3(Minpd, minpd)
@@ -380,6 +386,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Psrlq, psrlq)
AVX_PACKED_OP3(Psraw, psraw)
AVX_PACKED_OP3(Psrad, psrad)
+ AVX_PACKED_OP3(Pmaddwd, pmaddwd)
+ AVX_PACKED_OP3(Paddd, paddd)
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubq, psubq)
AVX_PACKED_OP3(Pmuludq, pmuludq)
@@ -444,6 +452,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef AVX_OP2_WITH_TYPE_SCOPE
#undef AVX_OP2_XO_SSE4
+#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
+ sse_scope) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ return; \
+ } \
+ if (CpuFeatures::IsSupported(sse_scope)) { \
+ CpuFeatureScope scope(this, sse_scope); \
+ name(dst, src); \
+ return; \
+ } \
+ UNREACHABLE(); \
+ }
+#define AVX_OP3_XO_SSE4(macro_name, name) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
+
+ AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
+
+#undef AVX_OP3_XO_SSE4
+#undef AVX_OP3_WITH_TYPE_SCOPE
+
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, Operand src);
void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
@@ -506,6 +538,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
+ void Roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
diff --git a/deps/v8/src/codegen/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index b8a7a3c827..a56dc13361 100644
--- a/deps/v8/src/codegen/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -9,6 +9,7 @@
V(packsswb, 66, 0F, 63) \
V(packssdw, 66, 0F, 6B) \
V(packuswb, 66, 0F, 67) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 503da3cb43..00f774f93c 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -30,10 +30,12 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
void CallInterfaceDescriptorData::InitializePlatformIndependent(
Flags flags, int return_count, int parameter_count,
- const MachineType* machine_types, int machine_types_length) {
+ const MachineType* machine_types, int machine_types_length,
+ StackArgumentOrder stack_order) {
DCHECK(IsInitializedPlatformSpecific());
flags_ = flags;
+ stack_order_ = stack_order;
return_count_ = return_count;
param_count_ = parameter_count;
const int types_length = return_count_ + param_count_;
@@ -83,7 +85,6 @@ void CallDescriptors::InitializeOncePerProcess() {
DCHECK(ContextOnlyDescriptor{}.HasContextParameter());
DCHECK(!NoContextDescriptor{}.HasContextParameter());
DCHECK(!AllocateDescriptor{}.HasContextParameter());
- DCHECK(!AllocateHeapNumberDescriptor{}.HasContextParameter());
DCHECK(!AbortDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat32ToNumberDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat64ToNumberDescriptor{}.HasContextParameter());
@@ -176,12 +177,6 @@ void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
}
-void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ScopeInfoRegister(), SlotsRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {TargetRegister(), NewTargetRegister()};
@@ -347,11 +342,6 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// This descriptor must use the same set of registers as the
@@ -391,44 +381,17 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
}
#endif // !V8_TARGET_ARCH_IA32
-void WasmTableInitDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmTableCopyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
kParameterCount - kStackArgumentsCount);
}
-
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
#endif
void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index fc27b46ca1..1f025d37a3 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -20,93 +20,99 @@ namespace internal {
BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Abort) \
- V(Allocate) \
- V(AllocateHeapNumber) \
- V(ApiCallback) \
- V(ApiGetter) \
- V(ArgumentsAdaptor) \
- V(ArrayConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(AsyncFunctionStackParameter) \
- V(BigIntToI32Pair) \
- V(BigIntToI64) \
- V(BinaryOp) \
- V(BinaryOp_WithFeedback) \
- V(CallForwardVarargs) \
- V(CallFunctionTemplate) \
- V(CallTrampoline) \
- V(CallTrampoline_WithFeedback) \
- V(CallVarargs) \
- V(CallWithArrayLike) \
- V(CallWithSpread) \
- V(CEntry1ArgvOnStack) \
- V(CloneObjectWithVector) \
- V(Compare) \
- V(Compare_WithFeedback) \
- V(ConstructForwardVarargs) \
- V(ConstructStub) \
- V(ConstructVarargs) \
- V(ConstructWithArrayLike) \
- V(Construct_WithFeedback) \
- V(ConstructWithSpread) \
- V(ContextOnly) \
- V(CppBuiltinAdaptor) \
- V(EphemeronKeyBarrier) \
- V(FastNewFunctionContext) \
- V(FastNewObject) \
- V(FrameDropperTrampoline) \
- V(GetIteratorStackParameter) \
- V(GetProperty) \
- V(GrowArrayElements) \
- V(I32PairToBigInt) \
- V(I64ToBigInt) \
- V(InterpreterCEntry1) \
- V(InterpreterCEntry2) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsThenCall) \
- V(InterpreterPushArgsThenConstruct) \
- V(JSTrampoline) \
- V(Load) \
- V(LoadGlobal) \
- V(LoadGlobalNoFeedback) \
- V(LoadGlobalWithVector) \
- V(LoadNoFeedback) \
- V(LoadWithVector) \
- V(NewArgumentsElements) \
- V(NoContext) \
- V(RecordWrite) \
- V(ResumeGenerator) \
- V(RunMicrotasks) \
- V(RunMicrotasksEntry) \
- V(Store) \
- V(StoreGlobal) \
- V(StoreGlobalWithVector) \
- V(StoreTransition) \
- V(StoreWithVector) \
- V(StringAt) \
- V(StringAtAsString) \
- V(StringSubstring) \
- V(TypeConversion) \
- V(TypeConversionStackParameter) \
- V(Typeof) \
- V(UnaryOp_WithFeedback) \
- V(Void) \
- V(WasmAtomicNotify) \
- V(WasmFloat32ToNumber) \
- V(WasmFloat64ToNumber) \
- V(WasmI32AtomicWait32) \
- V(WasmI32AtomicWait64) \
- V(WasmI64AtomicWait32) \
- V(WasmI64AtomicWait64) \
- V(WasmTableInit) \
- V(WasmTableCopy) \
- BUILTIN_LIST_TFS(V) \
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Abort) \
+ V(Allocate) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(ArgumentsAdaptor) \
+ V(ArrayConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(AsyncFunctionStackParameter) \
+ V(BigIntToI32Pair) \
+ V(BigIntToI64) \
+ V(BinaryOp) \
+ V(BinaryOp_WithFeedback) \
+ V(CallForwardVarargs) \
+ V(CallFunctionTemplate) \
+ V(CallTrampoline) \
+ V(CallTrampoline_WithFeedback) \
+ V(CallVarargs) \
+ V(CallWithArrayLike) \
+ V(CallWithArrayLike_WithFeedback) \
+ V(CallWithSpread) \
+ V(CallWithSpread_WithFeedback) \
+ V(CEntry1ArgvOnStack) \
+ V(CloneObjectWithVector) \
+ V(Compare) \
+ V(Compare_WithFeedback) \
+ V(ConstructForwardVarargs) \
+ V(ConstructStub) \
+ V(ConstructVarargs) \
+ V(ConstructWithArrayLike) \
+ V(ConstructWithArrayLike_WithFeedback) \
+ V(Construct_WithFeedback) \
+ V(ConstructWithSpread) \
+ V(ConstructWithSpread_WithFeedback) \
+ V(ContextOnly) \
+ V(CppBuiltinAdaptor) \
+ V(EphemeronKeyBarrier) \
+ V(FastNewObject) \
+ V(FrameDropperTrampoline) \
+ V(GetIteratorStackParameter) \
+ V(GetProperty) \
+ V(GrowArrayElements) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsThenCall) \
+ V(InterpreterPushArgsThenConstruct) \
+ V(JSTrampoline) \
+ V(Load) \
+ V(LoadGlobal) \
+ V(LoadGlobalNoFeedback) \
+ V(LoadGlobalWithVector) \
+ V(LoadNoFeedback) \
+ V(LoadWithVector) \
+ V(NoContext) \
+ V(RecordWrite) \
+ V(ResumeGenerator) \
+ V(RunMicrotasks) \
+ V(RunMicrotasksEntry) \
+ V(Store) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
+ V(StoreTransition) \
+ V(StoreWithVector) \
+ V(StringAt) \
+ V(StringAtAsString) \
+ V(StringSubstring) \
+ V(TypeConversion) \
+ V(TypeConversionStackParameter) \
+ V(Typeof) \
+ V(UnaryOp_WithFeedback) \
+ V(Void) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI64AtomicWait32) \
+ BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
+enum class StackArgumentOrder {
+ kDefault, // Arguments in the stack are pushed in the default/stub order (the
+ // first argument is pushed first).
+ kJS, // Arguments in the stack are pushed in the same order as the one used
+ // by JS-to-JS function calls. This should be used if calling a
+ // JSFunction or if the builtin is expected to be called directly from a
+ // JSFunction. When V8_REVERSE_JSARGS is set, this order is reversed
+ // compared to kDefault.
+};
+
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
enum Flag {
@@ -142,7 +148,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
void InitializePlatformIndependent(Flags flags, int return_count,
int parameter_count,
const MachineType* machine_types,
- int machine_types_length);
+ int machine_types_length,
+ StackArgumentOrder stack_order);
void Reset();
@@ -165,6 +172,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
DCHECK_LT(index, param_count_);
return machine_types_[return_count_ + index];
}
+ StackArgumentOrder stack_order() const { return stack_order_; }
void RestrictAllocatableRegisters(const Register* registers, int num) {
DCHECK_EQ(allocatable_registers_, 0);
@@ -199,6 +207,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
int return_count_ = -1;
int param_count_ = -1;
Flags flags_ = kNoFlags;
+ StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault;
// Specifying the set of registers that could be used by the register
// allocator. Currently, it's only used by RecordWrite code stub.
@@ -295,6 +304,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return data()->allocatable_registers();
}
+ StackArgumentOrder GetStackArgumentOrder() const {
+ return data()->stack_order();
+ }
+
static const Register ContextRegister();
const char* DebugName() const;
@@ -314,9 +327,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
CallInterfaceDescriptorData* data) {
// Default descriptor configuration: one result, all parameters are passed
// in registers and all parameters have MachineType::AnyTagged() type.
- data->InitializePlatformIndependent(CallInterfaceDescriptorData::kNoFlags,
- 1, data->register_param_count(),
- nullptr, 0);
+ data->InitializePlatformIndependent(
+ CallInterfaceDescriptorData::kNoFlags, 1, data->register_param_count(),
+ nullptr, 0, StackArgumentOrder::kDefault);
}
// Initializes |data| using the platform dependent default set of registers.
@@ -402,7 +415,8 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, nullptr, 0); \
+ kParameterCount, nullptr, 0, \
+ kStackArgumentOrder); \
} \
name(CallDescriptors::Key key) : base(key) {} \
\
@@ -420,9 +434,11 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
\
public:
-#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, return_count, ...) \
+#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, stack_order, \
+ return_count, ...) \
static constexpr int kDescriptorFlags = flags; \
static constexpr int kReturnCount = return_count; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = stack_order; \
enum ParameterIndices { \
__dummy = -1, /* to be able to pass zero arguments */ \
##__VA_ARGS__, \
@@ -431,35 +447,41 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kContext = kParameterCount /* implicit parameter */ \
};
-#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, return_count, ##__VA_ARGS__)
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, \
+ return_count, ##__VA_ARGS__)
// This is valid only for builtins that use EntryFrame, which does not scan
// stack arguments on GC.
-#define DEFINE_PARAMETERS_ENTRY(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kNoContext | \
- CallInterfaceDescriptorData::kNoStackScan; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- __dummy = -1, /* to be able to pass zero arguments */ \
- ##__VA_ARGS__, \
- \
- kParameterCount \
+#define DEFINE_PARAMETERS_ENTRY(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoContext | \
+ CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kDefault; \
+ static constexpr int kReturnCount = 1; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, 1, \
+ ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoContext, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoContext, StackArgumentOrder::kDefault, \
+ 1, ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_VARARGS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_VARARGS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kAllowVarArgs, StackArgumentOrder::kJS, 1, \
+ ##__VA_ARGS__)
#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
@@ -470,7 +492,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent( \
Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
- machine_types, arraysize(machine_types)); \
+ machine_types, arraysize(machine_types), kStackArgumentOrder); \
}
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
@@ -481,18 +503,20 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
-#define DEFINE_JS_PARAMETERS(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- kTarget, \
- kNewTarget, \
- kActualArgumentsCount, \
- ##__VA_ARGS__, \
- \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
+#define DEFINE_JS_PARAMETERS(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kAllowVarArgs; \
+ static constexpr int kReturnCount = 1; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS; \
+ enum ParameterIndices { \
+ kTarget, \
+ kNewTarget, \
+ kActualArgumentsCount, \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
};
#define DEFINE_JS_PARAMETER_TYPES(...) \
@@ -554,7 +578,8 @@ class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
kParameterCount, machine_types.data(),
- static_cast<int>(machine_types.size()));
+ static_cast<int>(machine_types.size()),
+ StackArgumentOrder::kDefault);
}
};
@@ -811,17 +836,6 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
#endif
};
-class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kScopeInfo, kSlots)
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo
- MachineType::Uint32()) // kSlots
- DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor)
-
- static const Register ScopeInfoRegister();
- static const Register SlotsRegister();
-};
-
class FastNewObjectDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget)
@@ -941,13 +955,27 @@ class CallFunctionTemplateDescriptor : public CallInterfaceDescriptor {
class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread)
+ DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged()) // kSpread
DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kArgumentsCount
+ MachineType::AnyTagged(), // kSpread
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
@@ -956,6 +984,19 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
@@ -979,6 +1020,20 @@ class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithSpread_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ // Note: kSlot comes before kSpread since as an untagged value it must be
+ // passed in a register.
+ DEFINE_JS_PARAMETERS(kSlot, kSpread, kMaybeFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
+ MachineType::AnyTagged(), // kSpread
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
@@ -988,6 +1043,21 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
@@ -1006,13 +1076,6 @@ class AbortDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
};
-class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT()
- DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
-};
-
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
@@ -1206,15 +1269,6 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
-class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kFrame, kLength, kMappedCount)
- DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kFrame
- MachineType::TaggedSigned(), // kLength
- MachineType::TaggedSigned()) // kMappedCount
- DECLARE_DESCRIPTOR(NewArgumentsElementsDescriptor, CallInterfaceDescriptor)
-};
-
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
@@ -1331,52 +1385,6 @@ class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
};
-class WasmTableInitDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kTableIndex,
- kSegmentIndex)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kTableIndex
- MachineType::AnyTagged(), // kSegmentindex
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableInitDescriptor, CallInterfaceDescriptor)
-};
-
-class WasmTableCopyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kDestinationTable,
- kSourceTable)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kDestinationTable
- MachineType::AnyTagged(), // kSourceTable
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableCopyDescriptor, CallInterfaceDescriptor)
-};
-
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
: public CallInterfaceDescriptor {
public:
@@ -1414,15 +1422,6 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
};
-class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kCount)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32()) // kCount
- DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
-};
-
class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
@@ -1461,26 +1460,6 @@ class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
};
-class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Int32(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
-class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint64(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
@@ -1497,11 +1476,12 @@ class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
@@ -1519,11 +1499,12 @@ class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
// kSlot is passed in a register, kMaybeFeedbackVector on the stack.
@@ -1538,7 +1519,7 @@ class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index ea05441594..e7e10208d7 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -188,50 +188,10 @@ class MachineType {
constexpr static MachineType Bool() {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
}
- constexpr static MachineType TaggedBool() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
- }
- constexpr static MachineType CompressedBool() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kBool);
- }
constexpr static MachineType None() {
return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
}
- // These naked representations should eventually go away.
- constexpr static MachineType RepWord8() {
- return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord16() {
- return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord32() {
- return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord64() {
- return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat32() {
- return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat64() {
- return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepSimd128() {
- return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
- }
- constexpr static MachineType RepTagged() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
- }
- constexpr static MachineType RepCompressed() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kNone);
- }
- constexpr static MachineType RepBit() {
- return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
- }
-
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 768b16b86c..19a514b2d9 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -3568,17 +3568,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3652,8 +3655,12 @@ void Assembler::CheckTrampolinePool() {
}
}
}
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
@@ -3794,6 +3801,7 @@ void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
addu(t, ra, t);
jalr(t);
if (bdslot == PROTECT) nop();
+ set_last_call_pc_(pc_);
}
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index a414168a9f..248bd1ac75 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -170,6 +170,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instrution, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK(IsMipsArchVariant(kMips32r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1593,6 +1622,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode, BranchDelaySlot bdslot);
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1856,6 +1887,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 6770ab5cce..4945ce4395 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -88,11 +72,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return a1;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; }
-
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
const Register LoadDescriptor::SlotRegister() { return a0; }
@@ -233,12 +212,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +311,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 48b2acf456..efb2dc11e1 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -3906,6 +3906,7 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -3938,6 +3939,7 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -5427,7 +5429,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 751d0f8703..b64005155d 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -3596,7 +3596,7 @@ void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
}
void Assembler::move_v(MSARegister wd, MSARegister ws) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid());
Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
(wd.code() << kWdShift) | MSA_ELM_MINOR;
@@ -3604,7 +3604,7 @@ void Assembler::move_v(MSARegister wd, MSARegister ws) {
}
void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(cd.is_valid() && rs.is_valid());
Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
(cd.code() << kWdShift) | MSA_ELM_MINOR;
@@ -3612,7 +3612,7 @@ void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
}
void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(rd.is_valid() && cs.is_valid());
Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
(rd.code() << kWdShift) | MSA_ELM_MINOR;
@@ -3763,17 +3763,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3856,8 +3859,12 @@ void Assembler::CheckTrampolinePool() {
}
}
nop();
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index f70e46f81b..b5edc75676 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -168,6 +168,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instruction, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK((kArchVariant == kMips64r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1629,6 +1658,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1882,6 +1913,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
RegList scratch_register_list_;
private:
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 077b49fa99..9e33d39eba 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -88,11 +72,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return a1;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; }
-
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
const Register LoadDescriptor::SlotRegister() { return a0; }
@@ -233,12 +212,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +311,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a665b76e80..785cf4aa5c 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -4235,6 +4235,7 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -5753,7 +5754,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 19f93e674e..160691cf89 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -19,103 +19,112 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure)
- : OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
+ Handle<JSFunction> closure, CodeKind code_kind)
+ : code_kind_(code_kind),
+ zone_(zone),
+ optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
shared_info_ = shared;
closure_ = closure;
- optimization_id_ = isolate->NextOptimizationId();
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
if (isolate->NeedsDetailedOptimizedCodeLineInfo()) {
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
}
SetTracingFlags(shared->PassesFilter(FLAG_trace_turbo_filter));
+ ConfigureFlags();
}
OptimizedCompilationInfo::OptimizedCompilationInfo(
- Vector<const char> debug_name, Zone* zone, Code::Kind code_kind)
- : OptimizedCompilationInfo(code_kind, zone) {
- debug_name_ = debug_name;
-
+ Vector<const char> debug_name, Zone* zone, CodeKind code_kind)
+ : code_kind_(code_kind),
+ zone_(zone),
+ optimization_id_(kNoOptimizationId),
+ debug_name_(debug_name) {
SetTracingFlags(
PassesFilter(debug_name, CStrVector(FLAG_trace_turbo_filter)));
+ ConfigureFlags();
}
-OptimizedCompilationInfo::OptimizedCompilationInfo(Code::Kind code_kind,
- Zone* zone)
- : code_kind_(code_kind), zone_(zone) {
- ConfigureFlags();
+#ifdef DEBUG
+bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ return untrusted_code_mitigations();
+ case kFunctionContextSpecializing:
+ return !IsNativeContextIndependent();
+ default:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ if (!GetFlag(kPoisonRegisterArguments)) return true;
+ return untrusted_code_mitigations() && called_with_code_start_register();
+ default:
+ return true;
+ }
+ UNREACHABLE();
}
+#endif // DEBUG
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) SetFlag(kUntrustedCodeMitigations);
+ if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
switch (code_kind_) {
- case Code::OPTIMIZED_FUNCTION:
- SetFlag(kCalledWithCodeStartRegister);
- SetFlag(kSwitchJumpTableEnabled);
+ case CodeKind::OPTIMIZED_FUNCTION:
if (FLAG_function_context_specialization) {
- MarkAsFunctionContextSpecializing();
- }
- if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
- }
- if (FLAG_untrusted_code_mitigations) {
- MarkAsPoisoningRegisterArguments();
- }
- if (FLAG_analyze_environment_liveness) {
- // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
- MarkAsAnalyzeEnvironmentLiveness();
+ set_function_context_specializing();
}
+ V8_FALLTHROUGH;
+ case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ set_called_with_code_start_register();
+ set_switch_jump_table();
+ if (FLAG_turbo_splitting) set_splitting();
+ if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
+ // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
+ if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;
- case Code::BYTECODE_HANDLER:
- SetFlag(kCalledWithCodeStartRegister);
- if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
- }
+ case CodeKind::BYTECODE_HANDLER:
+ set_called_with_code_start_register();
+ if (FLAG_turbo_splitting) set_splitting();
break;
- case Code::BUILTIN:
- case Code::STUB:
- if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
- }
+ case CodeKind::BUILTIN:
+ case CodeKind::STUB:
+ if (FLAG_turbo_splitting) set_splitting();
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
break;
- case Code::WASM_FUNCTION:
- case Code::WASM_TO_CAPI_FUNCTION:
- SetFlag(kSwitchJumpTableEnabled);
+ case CodeKind::WASM_FUNCTION:
+ case CodeKind::WASM_TO_CAPI_FUNCTION:
+ set_switch_jump_table();
break;
default:
break;
}
if (FLAG_turbo_control_flow_aware_allocation) {
- MarkAsTurboControlFlowAwareAllocation();
+ set_turbo_control_flow_aware_allocation();
} else {
- MarkAsTurboPreprocessRanges();
+ set_turbo_preprocess_ranges();
}
}
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
- if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ if (disable_future_optimization() && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
}
-void OptimizedCompilationInfo::set_deferred_handles(
- std::unique_ptr<DeferredHandles> deferred_handles) {
- DCHECK_NULL(deferred_handles_);
- deferred_handles_ = std::move(deferred_handles);
-}
-
void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate);
@@ -134,12 +143,12 @@ void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) {
if (bailout_reason_ == BailoutReason::kNoReason) {
bailout_reason_ = reason;
}
- SetFlag(kDisableFutureOptimization);
+ set_disable_future_optimization();
}
void OptimizedCompilationInfo::RetryOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- if (GetFlag(kDisableFutureOptimization)) return;
+ if (disable_future_optimization()) return;
bailout_reason_ = reason;
}
@@ -157,19 +166,19 @@ std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
switch (code_kind()) {
- case Code::STUB:
- case Code::BYTECODE_HANDLER:
- case Code::BUILTIN:
+ case CodeKind::STUB:
+ case CodeKind::BYTECODE_HANDLER:
+ case CodeKind::BUILTIN:
return StackFrame::STUB;
- case Code::WASM_FUNCTION:
+ case CodeKind::WASM_FUNCTION:
return StackFrame::WASM;
- case Code::WASM_TO_CAPI_FUNCTION:
+ case CodeKind::WASM_TO_CAPI_FUNCTION:
return StackFrame::WASM_EXIT;
- case Code::JS_TO_WASM_FUNCTION:
+ case CodeKind::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
- case Code::WASM_TO_JS_FUNCTION:
+ case CodeKind::WASM_TO_JS_FUNCTION:
return StackFrame::WASM_TO_JS;
- case Code::C_WASM_ENTRY:
+ case CodeKind::C_WASM_ENTRY:
return StackFrame::C_WASM_ENTRY;
default:
UNIMPLEMENTED();
@@ -177,6 +186,11 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
}
}
+void OptimizedCompilationInfo::SetCode(Handle<Code> code) {
+ DCHECK_EQ(code->kind(), code_kind());
+ code_ = code;
+}
+
void OptimizedCompilationInfo::SetWasmCompilationResult(
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result) {
wasm_compilation_result_ = std::move(wasm_compilation_result);
@@ -225,11 +239,11 @@ int OptimizedCompilationInfo::AddInlinedFunction(
void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (!passes_filter) return;
- if (FLAG_trace_turbo) SetFlag(kTraceTurboJson);
- if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph);
- if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
- if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation);
- if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker);
+ if (FLAG_trace_turbo) set_trace_turbo_json();
+ if (FLAG_trace_turbo_graph) set_trace_turbo_graph();
+ if (FLAG_trace_turbo_scheduled) set_trace_turbo_scheduled();
+ if (FLAG_trace_turbo_alloc) set_trace_turbo_allocation();
+ if (FLAG_trace_heap_broker) set_trace_heap_broker();
}
OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index d6d4c88c99..ac45bc2939 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -11,9 +11,12 @@
#include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
+#include "src/handles/persistent-handles.h"
#include "src/objects/objects.h"
+#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -21,11 +24,10 @@ namespace v8 {
namespace tracing {
class TracedValue;
-}
+} // namespace tracing
namespace internal {
-class DeferredHandles;
class FunctionLiteral;
class Isolate;
class JavaScriptFrame;
@@ -34,7 +36,7 @@ class Zone;
namespace wasm {
struct WasmCompilationResult;
-}
+} // namespace wasm
// OptimizedCompilationInfo encapsulates the information needed to compile
// optimized code for a given function, and the results of the optimized
@@ -43,38 +45,65 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
+
+#define FLAGS(V) \
+ V(FunctionContextSpecializing, function_context_specializing, 0) \
+ V(Inlining, inlining, 1) \
+ V(DisableFutureOptimization, disable_future_optimization, 2) \
+ V(Splitting, splitting, 3) \
+ V(SourcePositions, source_positions, 4) \
+ V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
+ V(LoopPeeling, loop_peeling, 6) \
+ V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
+ V(SwitchJumpTable, switch_jump_table, 8) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
+ V(PoisonRegisterArguments, poison_register_arguments, 10) \
+ V(AllocationFolding, allocation_folding, 11) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
+ V(TraceTurboJson, trace_turbo_json, 13) \
+ V(TraceTurboGraph, trace_turbo_graph, 14) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 16) \
+ V(TraceHeapBroker, trace_heap_broker, 17) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
+ V(TurboControlFlowAwareAllocation, turbo_control_flow_aware_allocation, 19) \
+ V(TurboPreprocessRanges, turbo_preprocess_ranges, 20) \
+ V(ConcurrentInlining, concurrent_inlining, 21)
+
enum Flag {
- kFunctionContextSpecializing = 1 << 0,
- kInliningEnabled = 1 << 1,
- kDisableFutureOptimization = 1 << 2,
- kSplittingEnabled = 1 << 3,
- kSourcePositionsEnabled = 1 << 4,
- kBailoutOnUninitialized = 1 << 5,
- kLoopPeelingEnabled = 1 << 6,
- kUntrustedCodeMitigations = 1 << 7,
- kSwitchJumpTableEnabled = 1 << 8,
- kCalledWithCodeStartRegister = 1 << 9,
- kPoisonRegisterArguments = 1 << 10,
- kAllocationFoldingEnabled = 1 << 11,
- kAnalyzeEnvironmentLiveness = 1 << 12,
- kTraceTurboJson = 1 << 13,
- kTraceTurboGraph = 1 << 14,
- kTraceTurboScheduled = 1 << 15,
- kTraceTurboAllocation = 1 << 16,
- kTraceHeapBroker = 1 << 17,
- kWasmRuntimeExceptionSupport = 1 << 18,
- kTurboControlFlowAwareAllocation = 1 << 19,
- kTurboPreprocessRanges = 1 << 20,
- kConcurrentInlining = 1 << 21,
+#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
+ FLAGS(DEF_ENUM)
+#undef DEF_ENUM
};
+#define DEF_GETTER(Camel, Lower, Bit) \
+ bool Lower() const { \
+ DCHECK(FlagGetIsValid(k##Camel)); \
+ return GetFlag(k##Camel); \
+ }
+ FLAGS(DEF_GETTER)
+#undef DEF_GETTER
+
+#define DEF_SETTER(Camel, Lower, Bit) \
+ void set_##Lower() { \
+ DCHECK(FlagSetIsValid(k##Camel)); \
+ SetFlag(k##Camel); \
+ }
+ FLAGS(DEF_SETTER)
+#undef DEF_SETTER
+
+#ifdef DEBUG
+ bool FlagGetIsValid(Flag flag) const;
+ bool FlagSetIsValid(Flag flag) const;
+#endif // DEBUG
+
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure);
+ Handle<JSFunction> closure, CodeKind code_kind);
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
- Code::Kind code_kind);
+ CodeKind code_kind);
~OptimizedCompilationInfo();
@@ -86,44 +115,12 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
- Code::Kind code_kind() const { return code_kind_; }
+ CodeKind code_kind() const { return code_kind_; }
int32_t builtin_index() const { return builtin_index_; }
void set_builtin_index(int32_t index) { builtin_index_ = index; }
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
- // Flags used by optimized compilation.
-
- void MarkAsConcurrentInlining() { SetFlag(kConcurrentInlining); }
- bool is_concurrent_inlining() const { return GetFlag(kConcurrentInlining); }
-
- void MarkAsTurboControlFlowAwareAllocation() {
- SetFlag(kTurboControlFlowAwareAllocation);
- }
- bool is_turbo_control_flow_aware_allocation() const {
- return GetFlag(kTurboControlFlowAwareAllocation);
- }
-
- void MarkAsTurboPreprocessRanges() { SetFlag(kTurboPreprocessRanges); }
- bool is_turbo_preprocess_ranges() const {
- return GetFlag(kTurboPreprocessRanges);
- }
-
- void MarkAsFunctionContextSpecializing() {
- SetFlag(kFunctionContextSpecializing);
- }
- bool is_function_context_specializing() const {
- return GetFlag(kFunctionContextSpecializing);
- }
-
- void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
- bool is_source_positions_enabled() const {
- return GetFlag(kSourcePositionsEnabled);
- }
-
- void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
- bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
-
void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
poisoning_level_ = poisoning_level;
}
@@ -131,78 +128,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return poisoning_level_;
}
- void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
- bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
-
- void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
- bool is_bailout_on_uninitialized() const {
- return GetFlag(kBailoutOnUninitialized);
- }
-
- void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
- bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
-
- bool has_untrusted_code_mitigations() const {
- return GetFlag(kUntrustedCodeMitigations);
- }
-
- bool switch_jump_table_enabled() const {
- return GetFlag(kSwitchJumpTableEnabled);
- }
-
- bool called_with_code_start_register() const {
- bool enabled = GetFlag(kCalledWithCodeStartRegister);
- return enabled;
- }
-
- void MarkAsPoisoningRegisterArguments() {
- DCHECK(has_untrusted_code_mitigations());
- SetFlag(kPoisonRegisterArguments);
- }
- bool is_poisoning_register_arguments() const {
- bool enabled = GetFlag(kPoisonRegisterArguments);
- DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
- DCHECK_IMPLIES(enabled, called_with_code_start_register());
- return enabled;
- }
-
- void MarkAsAllocationFoldingEnabled() { SetFlag(kAllocationFoldingEnabled); }
- bool is_allocation_folding_enabled() const {
- return GetFlag(kAllocationFoldingEnabled);
- }
-
- void MarkAsAnalyzeEnvironmentLiveness() {
- SetFlag(kAnalyzeEnvironmentLiveness);
- }
- bool is_analyze_environment_liveness() const {
- return GetFlag(kAnalyzeEnvironmentLiveness);
- }
-
- void SetWasmRuntimeExceptionSupport() {
- SetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool wasm_runtime_exception_support() {
- return GetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool trace_turbo_json_enabled() const { return GetFlag(kTraceTurboJson); }
-
- bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
-
- bool trace_turbo_allocation_enabled() const {
- return GetFlag(kTraceTurboAllocation);
- }
-
- bool trace_turbo_scheduled_enabled() const {
- return GetFlag(kTraceTurboScheduled);
- }
-
- bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); }
-
// Code getters and setters.
- void SetCode(Handle<Code> code) { code_ = code; }
+ void SetCode(Handle<Code> code);
void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
std::unique_ptr<wasm::WasmCompilationResult> ReleaseWasmCompilationResult();
@@ -217,19 +145,34 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
JSGlobalObject global_object() const;
// Accessors for the different compilation modes.
- bool IsOptimizing() const { return code_kind() == Code::OPTIMIZED_FUNCTION; }
- bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
- bool IsNotOptimizedFunctionOrWasmFunction() const {
- return code_kind() != Code::OPTIMIZED_FUNCTION &&
- code_kind() != Code::WASM_FUNCTION;
+ bool IsOptimizing() const {
+ return CodeKindIsOptimizedJSFunction(code_kind());
}
+ bool IsNativeContextIndependent() const {
+ return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ }
+ bool IsStub() const { return code_kind() == CodeKind::STUB; }
+ bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
+
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
}
- void set_deferred_handles(std::unique_ptr<DeferredHandles> deferred_handles);
+ void set_persistent_handles(
+ std::unique_ptr<PersistentHandles> persistent_handles) {
+ DCHECK_NULL(ph_);
+ ph_ = std::move(persistent_handles);
+ DCHECK_NOT_NULL(ph_);
+ }
+
+ void set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap> canonical_handles) {
+ DCHECK_NULL(canonical_handles_);
+ canonical_handles_ = std::move(canonical_handles);
+ DCHECK_NOT_NULL(canonical_handles_);
+ }
void ReopenHandlesInNewHandleScope(Isolate* isolate);
@@ -239,10 +182,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutReason bailout_reason() const { return bailout_reason_; }
- bool is_disable_future_optimization() const {
- return GetFlag(kDisableFutureOptimization);
- }
-
int optimization_id() const {
DCHECK(IsOptimizing());
return optimization_id_;
@@ -290,8 +229,22 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
TickCounter& tick_counter() { return tick_counter_; }
+ BasicBlockProfilerData* profiler_data() const { return profiler_data_; }
+ void set_profiler_data(BasicBlockProfilerData* profiler_data) {
+ profiler_data_ = profiler_data;
+ }
+
+ std::unique_ptr<PersistentHandles> DetachPersistentHandles() {
+ DCHECK_NOT_NULL(ph_);
+ return std::move(ph_);
+ }
+
+ std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles() {
+ DCHECK_NOT_NULL(canonical_handles_);
+ return std::move(canonical_handles_);
+ }
+
private:
- OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
void SetFlag(Flag flag) { flags_ |= flag; }
@@ -304,20 +257,21 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
PoisoningMitigationLevel poisoning_level_ =
PoisoningMitigationLevel::kDontPoison;
- Code::Kind code_kind_;
+ const CodeKind code_kind_;
int32_t builtin_index_ = -1;
// We retain a reference the bytecode array specifically to ensure it doesn't
// get flushed while we are optimizing the code.
Handle<BytecodeArray> bytecode_array_;
-
Handle<SharedFunctionInfo> shared_info_;
-
Handle<JSFunction> closure_;
// The compiled code.
Handle<Code> code_;
+ // Basic block profiling support.
+ BasicBlockProfilerData* profiler_data_ = nullptr;
+
// The WebAssembly compilation result, not published in the NativeModule yet.
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
@@ -326,15 +280,14 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
- Zone* zone_;
-
- std::unique_ptr<DeferredHandles> deferred_handles_;
+ Zone* const zone_;
BailoutReason bailout_reason_ = BailoutReason::kNoReason;
InlinedFunctionList inlined_functions_;
- int optimization_id_ = -1;
+ static constexpr int kNoOptimizationId = -1;
+ const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
@@ -345,6 +298,26 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
TickCounter tick_counter_;
+ // 1) PersistentHandles created via PersistentHandlesScope inside of
+ // CompilationHandleScope
+ // 2) Owned by OptimizedCompilationInfo
+ // 3) Owned by JSHeapBroker
+ // 4) Owned by the broker's LocalHeap
+ // 5) Back to the broker for a brief moment (after tearing down the
+ // LocalHeap as part of exiting LocalHeapScope)
+ // 6) Back to OptimizedCompilationInfo when exiting the LocalHeapScope.
+ //
+ // In normal execution it gets destroyed when PipelineData gets destroyed.
+ // There is a special case in GenerateCodeForTesting where the JSHeapBroker
+ // will not be retired in that same method. In this case, we need to re-attach
+ // the PersistentHandles container to the JSHeapBroker.
+ std::unique_ptr<PersistentHandles> ph_;
+
+ // Canonical handles follow the same path as described by the persistent
+ // handles above. The only difference is that is created in the
+ // CanonicalHandleScope(i.e step 1) is different).
+ std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo);
};
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index b9f09e23f2..37a53b49f2 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -1631,6 +1631,10 @@ void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
}
+void Assembler::fctiwuz(const DoubleRegister frt, const DoubleRegister frb) {
+ emit(EXT4 | FCTIWUZ | frt.code() * B21 | frb.code() * B11);
+}
+
void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
@@ -1758,29 +1762,31 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
}
// Vector instructions
-void Assembler::mfvsrd(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mfvsrwz(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mtvsrd(const DoubleRegister rt, const Register ra) {
+void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
int TX = 1;
emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
}
-void Assembler::vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VOR | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
+ int TX = 1;
+ emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
+ TX);
}
-void Assembler::vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VSRO | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
+ int SX = 1;
+ emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
+ SX);
}
// Pseudo instructions.
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 778e94c185..f26a3c89c9 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -427,6 +427,23 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
+#define DECLARE_PPC_XX2_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register rb) { \
+ xx2_form(instr_name, rt, rb); \
+ }
+
+ inline void xx2_form(Instr instr, Simd128Register t, Simd128Register b) {
+ // Using VR (high VSR) registers.
+ int BX = 1;
+ int TX = 1;
+
+ emit(instr | (t.code() & 0x1F) * B21 | (b.code() & 0x1F) * B11 | BX * B1 |
+ TX);
+ }
+
+ PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
+#undef DECLARE_PPC_XX2_INSTRUCTIONS
+
#define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
inline void name(const DoubleRegister rt, const DoubleRegister ra, \
const DoubleRegister rb) { \
@@ -435,9 +452,10 @@ class Assembler : public AssemblerBase {
inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
DoubleRegister b) {
- int AX = ((a.code() & 0x20) >> 5) & 0x1;
- int BX = ((b.code() & 0x20) >> 5) & 0x1;
- int TX = ((t.code() & 0x20) >> 5) & 0x1;
+ // Using VR (high VSR) registers.
+ int AX = 1;
+ int BX = 1;
+ int TX = 1;
emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
(b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
@@ -447,18 +465,68 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_XX3_INSTRUCTIONS
#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
- inline void name(const DoubleRegister rt, const DoubleRegister rb, \
+ inline void name(const Simd128Register rt, const Simd128Register rb, \
const Operand& imm) { \
vx_form(instr_name, rt, rb, imm); \
}
+#define DECLARE_PPC_VX_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb) { \
+ vx_form(instr_name, rt, ra, rb); \
+ }
+#define DECLARE_PPC_VX_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register rb) { \
+ vx_form(instr_name, rt, rb); \
+ }
- inline void vx_form(Instr instr, DoubleRegister rt, DoubleRegister rb,
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
const Operand& imm) {
emit(instr | rt.code() * B21 | imm.immediate() * B16 | rb.code() * B11);
}
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+ }
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb) {
+ emit(instr | rt.code() * B21 | rb.code() * B11);
+ }
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
+ PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
+ PPC_VX_OPCODE_C_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_C_FORM)
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
+
+#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const Simd128Register rc) { \
+ va_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void va_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, Simd128Register rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc.code() * B6);
+ }
+
+ PPC_VA_OPCODE_A_FORM_LIST(DECLARE_PPC_VA_INSTRUCTIONS_A_FORM)
+#undef DECLARE_PPC_VA_INSTRUCTIONS_A_FORM
+
+#define DECLARE_PPC_VC_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const RCBit rc = LeaveRC) { \
+ vc_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void vc_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, int rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc * B10);
+ }
+
+ PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
+#undef DECLARE_PPC_VC_INSTRUCTIONS
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
@@ -898,6 +966,7 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
void fctiw(const DoubleRegister frt, const DoubleRegister frb);
+ void fctiwuz(const DoubleRegister frt, const DoubleRegister frb);
void frin(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void friz(const DoubleRegister frt, const DoubleRegister frb,
@@ -947,13 +1016,11 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
// Vector instructions
- void mfvsrd(const Register ra, const DoubleRegister r);
- void mfvsrwz(const Register ra, const DoubleRegister r);
- void mtvsrd(const DoubleRegister rt, const Register ra);
- void vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
- void vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
+ void mfvsrd(const Register ra, const Simd128Register r);
+ void mfvsrwz(const Register ra, const Simd128Register r);
+ void mtvsrd(const Simd128Register rt, const Register ra);
+ void lxvd(const Simd128Register rt, const MemOperand& src);
+ void stxvd(const Simd128Register rt, const MemOperand& src);
// Pseudo instructions
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index b75c3e3257..b91b40ca15 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -194,8 +194,6 @@ using Instr = uint32_t;
V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
/* VSX Scalar Reciprocal Estimate Double-Precision */ \
V(xsredp, XSREDP, 0xF0000168) \
- /* VSX Scalar Reciprocal Estimate Single-Precision */ \
- V(xsresp, XSRESP, 0xF0000068) \
/* VSX Scalar Subtract Double-Precision */ \
V(xssubdp, XSSUBDP, 0xF0000140) \
/* VSX Scalar Subtract Single-Precision */ \
@@ -286,8 +284,6 @@ using Instr = uint32_t;
V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
/* VSX Vector Reciprocal Estimate Double-Precision */ \
V(xvredp, XVREDP, 0xF0000368) \
- /* VSX Vector Reciprocal Estimate Single-Precision */ \
- V(xvresp, XVRESP, 0xF0000268) \
/* VSX Vector Subtract Double-Precision */ \
V(xvsubdp, XVSUBDP, 0xF0000340) \
/* VSX Vector Subtract Single-Precision */ \
@@ -363,7 +359,43 @@ using Instr = uint32_t;
/* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_LIST(V) \
+#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
+ /* VSX Vector Absolute Value Double-Precision */ \
+ V(xvabsdp, XVABSDP, 0xF0000764) \
+ /* VSX Vector Negate Double-Precision */ \
+ V(xvnegdp, XVNEGDP, 0xF00007E4) \
+ /* VSX Vector Square Root Double-Precision */ \
+ V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
+ /* VSX Vector Absolute Value Single-Precision */ \
+ V(xvabssp, XVABSSP, 0xF0000664) \
+ /* VSX Vector Negate Single-Precision */ \
+ V(xvnegsp, XVNEGSP, 0xF00006E4) \
+ /* VSX Vector Reciprocal Estimate Single-Precision */ \
+ V(xvresp, XVRESP, 0xF0000268) \
+ /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
+ /* VSX Vector Square Root Single-Precision */ \
+ V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
+ V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
+ V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0)
+
+#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
+ /* VSX Scalar Square Root Double-Precision */ \
+ V(xssqrtdp, XSSQRTDP, 0xF000012C) \
+ /* VSX Scalar Reciprocal Estimate Single-Precision */ \
+ V(xsresp, XSRESP, 0xF0000068) \
+ /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
+ V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
+ /* VSX Scalar Square Root Single-Precision */ \
+ V(xssqrtsp, XSSQRTSP, 0xF000002C) \
/* Move To VSR Doubleword */ \
V(mtvsrd, MTVSRD, 0x7C000166) \
/* Move To VSR Word Algebraic */ \
@@ -423,18 +455,8 @@ using Instr = uint32_t;
V(xsrsp, XSRSP, 0xF0000464) \
/* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
- /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
- V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
- /* VSX Scalar Square Root Double-Precision */ \
- V(xssqrtdp, XSSQRTDP, 0xF000012C) \
- /* VSX Scalar Square Root Single-Precision */ \
- V(xssqrtsp, XSSQRTSP, 0xF000002C) \
/* VSX Scalar Test for software Square Root Double-Precision */ \
V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
- /* VSX Vector Absolute Value Double-Precision */ \
- V(xvabsdp, XVABSDP, 0xF0000764) \
- /* VSX Vector Absolute Value Single-Precision */ \
- V(xvabssp, XVABSSP, 0xF0000664) \
/* VSX Vector Convert Double-Precision to Single-Precision */ \
V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
/* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
@@ -454,23 +476,15 @@ using Instr = uint32_t;
/* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
- /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
/* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
/* Doubleword Saturate */ \
V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
- /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
/* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
/* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
/* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
- /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
- V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
/* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
/* Precision */ \
V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
@@ -479,16 +493,10 @@ using Instr = uint32_t;
V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
/* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
- /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
- V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
/* VSX Vector Negative Absolute Value Double-Precision */ \
V(xvnabsdp, XVNABSDP, 0xF00007A4) \
/* VSX Vector Negative Absolute Value Single-Precision */ \
V(xvnabssp, XVNABSSP, 0xF00006A4) \
- /* VSX Vector Negate Double-Precision */ \
- V(xvnegdp, XVNEGDP, 0xF00007E4) \
- /* VSX Vector Negate Single-Precision */ \
- V(xvnegsp, XVNEGSP, 0xF00006E4) \
/* VSX Vector Round to Double-Precision Integer */ \
V(xvrdpi, XVRDPI, 0xF0000324) \
/* VSX Vector Round to Double-Precision Integer using Current rounding */ \
@@ -513,17 +521,15 @@ using Instr = uint32_t;
V(xvrspiz, XVRSPIZ, 0xF0000264) \
/* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
- /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
- V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
- /* VSX Vector Square Root Double-Precision */ \
- V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
- /* VSX Vector Square Root Single-Precision */ \
- V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
/* VSX Vector Test for software Square Root Double-Precision */ \
V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
/* VSX Vector Test for software Square Root Single-Precision */ \
V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)
+#define PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_UNUSED_LIST(V)
+
#define PPC_EVX_OPCODE_LIST(V) \
/* Vector Load Double Word into Double Word by External PID Indexed */ \
V(evlddepx, EVLDDEPX, 0x7C00063E) \
@@ -1707,8 +1713,6 @@ using Instr = uint32_t;
V(stvewx, STVEWX, 0x7C00018E) \
/* Store Vector Indexed Last */ \
V(stvxl, STVXL, 0x7C0003CE) \
- /* Vector Minimum Signed Doubleword */ \
- V(vminsd, VMINSD, 0x100003C2) \
/* Floating Merge Even Word */ \
V(fmrgew, FMRGEW, 0xFC00078C) \
/* Floating Merge Odd Word */ \
@@ -1920,7 +1924,15 @@ using Instr = uint32_t;
/* Floating Reciprocal Square Root Estimate Single */ \
V(frsqrtes, FRSQRTES, 0xEC000034)
-#define PPC_VA_OPCODE_LIST(V) \
+#define PPC_VA_OPCODE_A_FORM_LIST(V) \
+ /* Vector Permute */ \
+ V(vperm, VPERM, 0x1000002B) \
+ /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
+ V(vmladduhm, VMLADDUHM, 0x10000022) \
+ /* Vector Select */ \
+ V(vsel, VSEL, 0x1000002A)
+
+#define PPC_VA_OPCODE_UNUSED_LIST(V) \
/* Vector Add Extended & write Carry Unsigned Quadword */ \
V(vaddecuq, VADDECUQ, 0x1000003D) \
/* Vector Add Extended Unsigned Quadword Modulo */ \
@@ -1931,8 +1943,6 @@ using Instr = uint32_t;
V(vmhaddshs, VMHADDSHS, 0x10000020) \
/* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
V(vmhraddshs, VMHRADDSHS, 0x10000021) \
- /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
- V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Multiply-Sum Mixed Byte Modulo */ \
V(vmsummbm, VMSUMMBM, 0x10000025) \
/* Vector Multiply-Sum Signed Halfword Modulo */ \
@@ -1947,10 +1957,6 @@ using Instr = uint32_t;
V(vmsumuhs, VMSUMUHS, 0x10000027) \
/* Vector Negative Multiply-Subtract Single-Precision */ \
V(vnmsubfp, VNMSUBFP, 0x1000002F) \
- /* Vector Permute */ \
- V(vperm, VPERM, 0x1000002B) \
- /* Vector Select */ \
- V(vsel, VSEL, 0x1000002A) \
/* Vector Shift Left Double by Octet Immediate */ \
V(vsldoi, VSLDOI, 0x1000002C) \
/* Vector Subtract Extended & write Carry Unsigned Quadword */ \
@@ -1960,6 +1966,10 @@ using Instr = uint32_t;
/* Vector Permute and Exclusive-OR */ \
V(vpermxor, VPERMXOR, 0x1000002D)
+#define PPC_VA_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_A_FORM_LIST(V) \
+ PPC_VA_OPCODE_UNUSED_LIST(V)
+
#define PPC_XX1_OPCODE_LIST(V) \
/* Load VSR Scalar Doubleword Indexed */ \
V(lxsdx, LXSDX, 0x7C000498) \
@@ -2200,6 +2210,150 @@ using Instr = uint32_t;
/* Vector Splat Halfword */ \
V(vsplth, VSPLTH, 0x1000024C)
+#define PPC_VX_OPCODE_B_FORM_LIST(V) \
+ /* Vector Logical OR */ \
+ V(vor, VOR, 0x10000484) \
+ /* Vector Logical XOR */ \
+ V(vxor, VXOR, 0x100004C4) \
+ /* Vector Logical NOR */ \
+ V(vnor, VNOR, 0x10000504) \
+ /* Vector Shift Right by Octet */ \
+ V(vsro, VSRO, 0x1000044C) \
+ /* Vector Shift Left by Octet */ \
+ V(vslo, VSLO, 0x1000040C) \
+ /* Vector Add Unsigned Doubleword Modulo */ \
+ V(vaddudm, VADDUDM, 0x100000C0) \
+ /* Vector Add Unsigned Word Modulo */ \
+ V(vadduwm, VADDUWM, 0x10000080) \
+ /* Vector Add Unsigned Halfword Modulo */ \
+ V(vadduhm, VADDUHM, 0x10000040) \
+ /* Vector Add Unsigned Byte Modulo */ \
+ V(vaddubm, VADDUBM, 0x10000000) \
+ /* Vector Add Single-Precision */ \
+ V(vaddfp, VADDFP, 0x1000000A) \
+ /* Vector Subtract Single-Precision */ \
+ V(vsubfp, VSUBFP, 0x1000004A) \
+ /* Vector Subtract Unsigned Doubleword Modulo */ \
+ V(vsubudm, VSUBUDM, 0x100004C0) \
+ /* Vector Subtract Unsigned Word Modulo */ \
+ V(vsubuwm, VSUBUWM, 0x10000480) \
+ /* Vector Subtract Unsigned Halfword Modulo */ \
+ V(vsubuhm, VSUBUHM, 0x10000440) \
+ /* Vector Subtract Unsigned Byte Modulo */ \
+ V(vsububm, VSUBUBM, 0x10000400) \
+ /* Vector Multiply Unsigned Word Modulo */ \
+ V(vmuluwm, VMULUWM, 0x10000089) \
+ /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
+ V(vpkuhum, VPKUHUM, 0x1000000E) \
+ /* Vector Multiply Even Unsigned Byte */ \
+ V(vmuleub, VMULEUB, 0x10000208) \
+ /* Vector Multiply Odd Unsigned Byte */ \
+ V(vmuloub, VMULOUB, 0x10000008) \
+ /* Vector Sum across Quarter Signed Halfword Saturate */ \
+ V(vsum4shs, VSUM4SHS, 0x10000648) \
+ /* Vector Pack Unsigned Word Unsigned Saturate */ \
+ V(vpkuwus, VPKUWUS, 0x100000CE) \
+ /* Vector Sum across Half Signed Word Saturate */ \
+ V(vsum2sws, VSUM2SWS, 0x10000688) \
+ /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
+ V(vpkudum, VPKUDUM, 0x1000044E) \
+ /* Vector Maximum Signed Byte */ \
+ V(vmaxsb, VMAXSB, 0x10000102) \
+ /* Vector Maximum Unsigned Byte */ \
+ V(vmaxub, VMAXUB, 0x10000002) \
+ /* Vector Maximum Signed Doubleword */ \
+ V(vmaxsd, VMAXSD, 0x100001C2) \
+ /* Vector Maximum Unsigned Doubleword */ \
+ V(vmaxud, VMAXUD, 0x100000C2) \
+ /* Vector Maximum Signed Halfword */ \
+ V(vmaxsh, VMAXSH, 0x10000142) \
+ /* Vector Maximum Unsigned Halfword */ \
+ V(vmaxuh, VMAXUH, 0x10000042) \
+ /* Vector Maximum Signed Word */ \
+ V(vmaxsw, VMAXSW, 0x10000182) \
+ /* Vector Maximum Unsigned Word */ \
+ V(vmaxuw, VMAXUW, 0x10000082) \
+ /* Vector Minimum Signed Byte */ \
+ V(vminsb, VMINSB, 0x10000302) \
+ /* Vector Minimum Unsigned Byte */ \
+ V(vminub, VMINUB, 0x10000202) \
+ /* Vector Minimum Signed Doubleword */ \
+ V(vminsd, VMINSD, 0x100003C2) \
+ /* Vector Minimum Unsigned Doubleword */ \
+ V(vminud, VMINUD, 0x100002C2) \
+ /* Vector Minimum Signed Halfword */ \
+ V(vminsh, VMINSH, 0x10000342) \
+ /* Vector Minimum Unsigned Halfword */ \
+ V(vminuh, VMINUH, 0x10000242) \
+ /* Vector Minimum Signed Word */ \
+ V(vminsw, VMINSW, 0x10000382) \
+ /* Vector Minimum Unsigned Word */ \
+ V(vminuw, VMINUW, 0x10000282) \
+ /* Vector Shift Left Byte */ \
+ V(vslb, VSLB, 0x10000104) \
+ /* Vector Shift Left Word */ \
+ V(vslw, VSLW, 0x10000184) \
+ /* Vector Shift Left Halfword */ \
+ V(vslh, VSLH, 0x10000144) \
+ /* Vector Shift Left Doubleword */ \
+ V(vsld, VSLD, 0x100005C4) \
+ /* Vector Shift Right Byte */ \
+ V(vsrb, VSRB, 0x10000204) \
+ /* Vector Shift Right Word */ \
+ V(vsrw, VSRW, 0x10000284) \
+ /* Vector Shift Right Halfword */ \
+ V(vsrh, VSRH, 0x10000244) \
+ /* Vector Shift Right Doubleword */ \
+ V(vsrd, VSRD, 0x100006C4) \
+ /* Vector Shift Right Algebraic Byte */ \
+ V(vsrab, VSRAB, 0x10000304) \
+ /* Vector Shift Right Algebraic Word */ \
+ V(vsraw, VSRAW, 0x10000384) \
+ /* Vector Shift Right Algebraic Halfword */ \
+ V(vsrah, VSRAH, 0x10000344) \
+ /* Vector Shift Right Algebraic Doubleword */ \
+ V(vsrad, VSRAD, 0x100003C4) \
+ /* Vector Logical AND */ \
+ V(vand, VAND, 0x10000404) \
+ /* Vector Pack Signed Word Signed Saturate */ \
+ V(vpkswss, VPKSWSS, 0x100001CE) \
+ /* Vector Pack Signed Word Unsigned Saturate */ \
+ V(vpkswus, VPKSWUS, 0x1000014E) \
+ /* Vector Pack Signed Halfword Signed Saturate */ \
+ V(vpkshss, VPKSHSS, 0x1000018E) \
+ /* Vector Pack Signed Halfword Unsigned Saturate */ \
+ V(vpkshus, VPKSHUS, 0x1000010E) \
+ /* Vector Add Signed Halfword Saturate */ \
+ V(vaddshs, VADDSHS, 0x10000340) \
+ /* Vector Subtract Signed Halfword Saturate */ \
+ V(vsubshs, VSUBSHS, 0x10000740) \
+ /* Vector Add Unsigned Halfword Saturate */ \
+ V(vadduhs, VADDUHS, 0x10000240) \
+ /* Vector Subtract Unsigned Halfword Saturate */ \
+ V(vsubuhs, VSUBUHS, 0x10000640) \
+ /* Vector Add Signed Byte Saturate */ \
+ V(vaddsbs, VADDSBS, 0x10000300) \
+ /* Vector Subtract Signed Byte Saturate */ \
+ V(vsubsbs, VSUBSBS, 0x10000700) \
+ /* Vector Add Unsigned Byte Saturate */ \
+ V(vaddubs, VADDUBS, 0x10000200) \
+ /* Vector Subtract Unsigned Byte Saturate */ \
+ V(vsububs, VSUBUBS, 0x10000600) \
+ /* Vector Average Unsigned Byte */ \
+ V(vavgub, VAVGUB, 0x10000402) \
+ /* Vector Average Unsigned Halfword */ \
+ V(vavguh, VAVGUH, 0x10000442)
+
+#define PPC_VX_OPCODE_C_FORM_LIST(V) \
+ /* Vector Unpack Low Signed Halfword */ \
+ V(vupklsh, VUPKLSH, 0x100002CE) \
+ /* Vector Unpack High Signed Halfword */ \
+ V(vupkhsh, VUPKHSH, 0x1000024E) \
+ /* Vector Unpack Low Signed Byte */ \
+ V(vupklsb, VUPKLSB, 0x1000028E) \
+ /* Vector Unpack High Signed Byte */ \
+ V(vupkhsb, VUPKHSB, 0x1000020E)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2213,32 +2367,12 @@ using Instr = uint32_t;
V(vaddcuq, VADDCUQ, 0x10000140) \
/* Vector Add and Write Carry-Out Unsigned Word */ \
V(vaddcuw, VADDCUW, 0x10000180) \
- /* Vector Add Single-Precision */ \
- V(vaddfp, VADDFP, 0x1000000A) \
- /* Vector Add Signed Byte Saturate */ \
- V(vaddsbs, VADDSBS, 0x10000300) \
- /* Vector Add Signed Halfword Saturate */ \
- V(vaddshs, VADDSHS, 0x10000340) \
/* Vector Add Signed Word Saturate */ \
V(vaddsws, VADDSWS, 0x10000380) \
- /* Vector Add Unsigned Byte Modulo */ \
- V(vaddubm, VADDUBM, 0x10000000) \
- /* Vector Add Unsigned Byte Saturate */ \
- V(vaddubs, VADDUBS, 0x10000200) \
- /* Vector Add Unsigned Doubleword Modulo */ \
- V(vaddudm, VADDUDM, 0x100000C0) \
- /* Vector Add Unsigned Halfword Modulo */ \
- V(vadduhm, VADDUHM, 0x10000040) \
- /* Vector Add Unsigned Halfword Saturate */ \
- V(vadduhs, VADDUHS, 0x10000240) \
/* Vector Add Unsigned Quadword Modulo */ \
V(vadduqm, VADDUQM, 0x10000100) \
- /* Vector Add Unsigned Word Modulo */ \
- V(vadduwm, VADDUWM, 0x10000080) \
/* Vector Add Unsigned Word Saturate */ \
V(vadduws, VADDUWS, 0x10000280) \
- /* Vector Logical AND */ \
- V(vand, VAND, 0x10000404) \
/* Vector Logical AND with Complement */ \
V(vandc, VANDC, 0x10000444) \
/* Vector Average Signed Byte */ \
@@ -2247,10 +2381,6 @@ using Instr = uint32_t;
V(vavgsh, VAVGSH, 0x10000542) \
/* Vector Average Signed Word */ \
V(vavgsw, VAVGSW, 0x10000582) \
- /* Vector Average Unsigned Byte */ \
- V(vavgub, VAVGUB, 0x10000402) \
- /* Vector Average Unsigned Halfword */ \
- V(vavguh, VAVGUH, 0x10000442) \
/* Vector Average Unsigned Word */ \
V(vavguw, VAVGUW, 0x10000482) \
/* Vector Bit Permute Quadword */ \
@@ -2283,38 +2413,8 @@ using Instr = uint32_t;
V(vlogefp, VLOGEFP, 0x100001CA) \
/* Vector Maximum Single-Precision */ \
V(vmaxfp, VMAXFP, 0x1000040A) \
- /* Vector Maximum Signed Byte */ \
- V(vmaxsb, VMAXSB, 0x10000102) \
- /* Vector Maximum Signed Doubleword */ \
- V(vmaxsd, VMAXSD, 0x100001C2) \
- /* Vector Maximum Signed Halfword */ \
- V(vmaxsh, VMAXSH, 0x10000142) \
- /* Vector Maximum Signed Word */ \
- V(vmaxsw, VMAXSW, 0x10000182) \
- /* Vector Maximum Unsigned Byte */ \
- V(vmaxub, VMAXUB, 0x10000002) \
- /* Vector Maximum Unsigned Doubleword */ \
- V(vmaxud, VMAXUD, 0x100000C2) \
- /* Vector Maximum Unsigned Halfword */ \
- V(vmaxuh, VMAXUH, 0x10000042) \
- /* Vector Maximum Unsigned Word */ \
- V(vmaxuw, VMAXUW, 0x10000082) \
/* Vector Minimum Single-Precision */ \
V(vminfp, VMINFP, 0x1000044A) \
- /* Vector Minimum Signed Byte */ \
- V(vminsb, VMINSB, 0x10000302) \
- /* Vector Minimum Signed Halfword */ \
- V(vminsh, VMINSH, 0x10000342) \
- /* Vector Minimum Signed Word */ \
- V(vminsw, VMINSW, 0x10000382) \
- /* Vector Minimum Unsigned Byte */ \
- V(vminub, VMINUB, 0x10000202) \
- /* Vector Minimum Unsigned Doubleword */ \
- V(vminud, VMINUD, 0x100002C2) \
- /* Vector Minimum Unsigned Halfword */ \
- V(vminuh, VMINUH, 0x10000242) \
- /* Vector Minimum Unsigned Word */ \
- V(vminuw, VMINUW, 0x10000282) \
/* Vector Merge High Byte */ \
V(vmrghb, VMRGHB, 0x1000000C) \
/* Vector Merge High Halfword */ \
@@ -2333,8 +2433,6 @@ using Instr = uint32_t;
V(vmulesh, VMULESH, 0x10000348) \
/* Vector Multiply Even Signed Word */ \
V(vmulesw, VMULESW, 0x10000388) \
- /* Vector Multiply Even Unsigned Byte */ \
- V(vmuleub, VMULEUB, 0x10000208) \
/* Vector Multiply Even Unsigned Halfword */ \
V(vmuleuh, VMULEUH, 0x10000248) \
/* Vector Multiply Even Unsigned Word */ \
@@ -2345,20 +2443,12 @@ using Instr = uint32_t;
V(vmulosh, VMULOSH, 0x10000148) \
/* Vector Multiply Odd Signed Word */ \
V(vmulosw, VMULOSW, 0x10000188) \
- /* Vector Multiply Odd Unsigned Byte */ \
- V(vmuloub, VMULOUB, 0x10000008) \
/* Vector Multiply Odd Unsigned Halfword */ \
V(vmulouh, VMULOUH, 0x10000048) \
/* Vector Multiply Odd Unsigned Word */ \
V(vmulouw, VMULOUW, 0x10000088) \
- /* Vector Multiply Unsigned Word Modulo */ \
- V(vmuluwm, VMULUWM, 0x10000089) \
/* Vector NAND */ \
V(vnand, VNAND, 0x10000584) \
- /* Vector Logical NOR */ \
- V(vnor, VNOR, 0x10000504) \
- /* Vector Logical OR */ \
- V(vor, VOR, 0x10000484) \
/* Vector OR with Complement */ \
V(vorc, VORC, 0x10000544) \
/* Vector Pack Pixel */ \
@@ -2367,26 +2457,12 @@ using Instr = uint32_t;
V(vpksdss, VPKSDSS, 0x100005CE) \
/* Vector Pack Signed Doubleword Unsigned Saturate */ \
V(vpksdus, VPKSDUS, 0x1000054E) \
- /* Vector Pack Signed Halfword Signed Saturate */ \
- V(vpkshss, VPKSHSS, 0x1000018E) \
- /* Vector Pack Signed Halfword Unsigned Saturate */ \
- V(vpkshus, VPKSHUS, 0x1000010E) \
- /* Vector Pack Signed Word Signed Saturate */ \
- V(vpkswss, VPKSWSS, 0x100001CE) \
- /* Vector Pack Signed Word Unsigned Saturate */ \
- V(vpkswus, VPKSWUS, 0x1000014E) \
- /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
- V(vpkudum, VPKUDUM, 0x1000044E) \
/* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
V(vpkudus, VPKUDUS, 0x100004CE) \
- /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
- V(vpkuhum, VPKUHUM, 0x1000000E) \
/* Vector Pack Unsigned Halfword Unsigned Saturate */ \
V(vpkuhus, VPKUHUS, 0x1000008E) \
/* Vector Pack Unsigned Word Unsigned Modulo */ \
V(vpkuwum, VPKUWUM, 0x1000004E) \
- /* Vector Pack Unsigned Word Unsigned Saturate */ \
- V(vpkuwus, VPKUWUS, 0x100000CE) \
/* Vector Polynomial Multiply-Sum Byte */ \
V(vpmsumb, VPMSUMB, 0x10000408) \
/* Vector Polynomial Multiply-Sum Doubleword */ \
@@ -2425,16 +2501,6 @@ using Instr = uint32_t;
V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
/* Vector Shift Left */ \
V(vsl, VSL, 0x100001C4) \
- /* Vector Shift Left Byte */ \
- V(vslb, VSLB, 0x10000104) \
- /* Vector Shift Left Doubleword */ \
- V(vsld, VSLD, 0x100005C4) \
- /* Vector Shift Left Halfword */ \
- V(vslh, VSLH, 0x10000144) \
- /* Vector Shift Left by Octet */ \
- V(vslo, VSLO, 0x1000040C) \
- /* Vector Shift Left Word */ \
- V(vslw, VSLW, 0x10000184) \
/* Vector Splat Immediate Signed Byte */ \
V(vspltisb, VSPLTISB, 0x1000030C) \
/* Vector Splat Immediate Signed Halfword */ \
@@ -2443,80 +2509,30 @@ using Instr = uint32_t;
V(vspltisw, VSPLTISW, 0x1000038C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
- /* Vector Shift Right Algebraic Byte */ \
- V(vsrab, VSRAB, 0x10000304) \
- /* Vector Shift Right Algebraic Doubleword */ \
- V(vsrad, VSRAD, 0x100003C4) \
- /* Vector Shift Right Algebraic Halfword */ \
- V(vsrah, VSRAH, 0x10000344) \
- /* Vector Shift Right Algebraic Word */ \
- V(vsraw, VSRAW, 0x10000384) \
- /* Vector Shift Right Byte */ \
- V(vsrb, VSRB, 0x10000204) \
- /* Vector Shift Right Doubleword */ \
- V(vsrd, VSRD, 0x100006C4) \
- /* Vector Shift Right Halfword */ \
- V(vsrh, VSRH, 0x10000244) \
- /* Vector Shift Right by Octet */ \
- V(vsro, VSRO, 0x1000044C) \
- /* Vector Shift Right Word */ \
- V(vsrw, VSRW, 0x10000284) \
/* Vector Subtract & write Carry Unsigned Quadword */ \
V(vsubcuq, VSUBCUQ, 0x10000540) \
/* Vector Subtract and Write Carry-Out Unsigned Word */ \
V(vsubcuw, VSUBCUW, 0x10000580) \
- /* Vector Subtract Single-Precision */ \
- V(vsubfp, VSUBFP, 0x1000004A) \
- /* Vector Subtract Signed Byte Saturate */ \
- V(vsubsbs, VSUBSBS, 0x10000700) \
- /* Vector Subtract Signed Halfword Saturate */ \
- V(vsubshs, VSUBSHS, 0x10000740) \
/* Vector Subtract Signed Word Saturate */ \
V(vsubsws, VSUBSWS, 0x10000780) \
- /* Vector Subtract Unsigned Byte Modulo */ \
- V(vsububm, VSUBUBM, 0x10000400) \
- /* Vector Subtract Unsigned Byte Saturate */ \
- V(vsububs, VSUBUBS, 0x10000600) \
- /* Vector Subtract Unsigned Doubleword Modulo */ \
- V(vsubudm, VSUBUDM, 0x100004C0) \
- /* Vector Subtract Unsigned Halfword Modulo */ \
- V(vsubuhm, VSUBUHM, 0x10000440) \
- /* Vector Subtract Unsigned Halfword Saturate */ \
- V(vsubuhs, VSUBUHS, 0x10000640) \
/* Vector Subtract Unsigned Quadword Modulo */ \
V(vsubuqm, VSUBUQM, 0x10000500) \
- /* Vector Subtract Unsigned Word Modulo */ \
- V(vsubuwm, VSUBUWM, 0x10000480) \
/* Vector Subtract Unsigned Word Saturate */ \
V(vsubuws, VSUBUWS, 0x10000680) \
- /* Vector Sum across Half Signed Word Saturate */ \
- V(vsum2sws, VSUM2SWS, 0x10000688) \
/* Vector Sum across Quarter Signed Byte Saturate */ \
V(vsum4sbs, VSUM4SBS, 0x10000708) \
- /* Vector Sum across Quarter Signed Halfword Saturate */ \
- V(vsum4shs, VSUM4SHS, 0x10000648) \
/* Vector Sum across Quarter Unsigned Byte Saturate */ \
V(vsum4bus, VSUM4BUS, 0x10000608) \
/* Vector Sum across Signed Word Saturate */ \
V(vsumsws, VSUMSWS, 0x10000788) \
/* Vector Unpack High Pixel */ \
V(vupkhpx, VUPKHPX, 0x1000034E) \
- /* Vector Unpack High Signed Byte */ \
- V(vupkhsb, VUPKHSB, 0x1000020E) \
- /* Vector Unpack High Signed Halfword */ \
- V(vupkhsh, VUPKHSH, 0x1000024E) \
/* Vector Unpack High Signed Word */ \
V(vupkhsw, VUPKHSW, 0x1000064E) \
/* Vector Unpack Low Pixel */ \
V(vupklpx, VUPKLPX, 0x100003CE) \
- /* Vector Unpack Low Signed Byte */ \
- V(vupklsb, VUPKLSB, 0x1000028E) \
- /* Vector Unpack Low Signed Halfword */ \
- V(vupklsh, VUPKLSH, 0x100002CE) \
/* Vector Unpack Low Signed Word */ \
V(vupklsw, VUPKLSW, 0x100006CE) \
- /* Vector Logical XOR */ \
- V(vxor, VXOR, 0x100004C4) \
/* Vector AES Cipher */ \
V(vcipher, VCIPHER, 0x10000508) \
/* Vector AES Cipher Last */ \
@@ -2538,6 +2554,8 @@ using Instr = uint32_t;
#define PPC_VX_OPCODE_LIST(V) \
PPC_VX_OPCODE_A_FORM_LIST(V) \
+ PPC_VX_OPCODE_B_FORM_LIST(V) \
+ PPC_VX_OPCODE_C_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index cd0ab1a328..cab95e2f41 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return r4;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r3; }
-
const Register LoadDescriptor::ReceiverRegister() { return r4; }
const Register LoadDescriptor::NameRegister() { return r5; }
const Register LoadDescriptor::SlotRegister() { return r3; }
@@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
@@ -295,6 +285,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 3cf819f102..c99977a5be 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1318,7 +1318,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract.
- // ARM has some sanity checks as per below, considering add them for PPC
+ // ARM has some checks as per below, considering add them for PPC
// DCHECK_EQ(actual_parameter_count, r3);
// DCHECK_EQ(expected_parameter_count, r5);
@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
@@ -2828,6 +2828,24 @@ void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
}
}
+void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
+ Register ScratchReg,
+ Simd128Register ScratchDoubleReg) {
+ // lvx needs the stack to be 16 byte aligned.
+ // We first use lxvd/stxvd to copy the content on an aligned address. lxvd
+ // itself reverses the lanes so it cannot be used as is.
+ lxvd(ScratchDoubleReg, mem);
+ mr(ScratchReg, sp);
+ ClearRightImm(
+ sp, sp,
+ Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
+ addi(sp, sp, Operand(-16));
+ stxvd(kScratchDoubleReg, MemOperand(r0, sp));
+ // Load it with correct lane ordering.
+ lvx(dst, MemOperand(r0, sp));
+ mr(sp, ScratchReg);
+}
+
void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
@@ -2880,6 +2898,23 @@ void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
}
}
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
+ Register ScratchReg,
+ Simd128Register ScratchDoubleReg) {
+ // stvx needs the stack to be 16 byte aligned.
+ // We use lxvd/stxvd to store the content on an aligned address. stxvd
+ // itself reverses the lanes so it cannot be used as is.
+ mr(ScratchReg, sp);
+ ClearRightImm(
+ sp, sp,
+ Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
+ addi(sp, sp, Operand(-16));
+ stvx(src, MemOperand(r0, sp));
+ lxvd(ScratchDoubleReg, MemOperand(r0, sp));
+ mr(sp, ScratchReg);
+ stxvd(ScratchDoubleReg, mem);
+}
+
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
@@ -3006,6 +3041,53 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreDouble(scratch_1, src, r0);
}
+void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
+ Simd128Register scratch) {
+ if (src == dst) return;
+ vor(scratch, src, src);
+ vor(src, dst, dst);
+ vor(dst, scratch, scratch);
+}
+
+void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
+ Simd128Register scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ // push d0, to be used as scratch
+ addi(sp, sp, Operand(-kSimd128Size));
+ StoreSimd128(d0, MemOperand(r0, sp), r0, scratch);
+ mov(ip, Operand(dst.offset()));
+ LoadSimd128(d0, MemOperand(dst.ra(), ip), r0, scratch);
+ StoreSimd128(src, MemOperand(dst.ra(), ip), r0, scratch);
+ vor(src, d0, d0);
+ // restore d0
+ LoadSimd128(d0, MemOperand(r0, sp), ip, scratch);
+ addi(sp, sp, Operand(kSimd128Size));
+}
+
+void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
+ Simd128Register scratch) {
+ // push d0 and d1, to be used as scratch
+ addi(sp, sp, Operand(2 * -kSimd128Size));
+ StoreSimd128(d0, MemOperand(r0, sp), ip, scratch);
+ li(ip, Operand(kSimd128Size));
+ StoreSimd128(d1, MemOperand(ip, sp), r0, scratch);
+
+ mov(ip, Operand(src.offset()));
+ LoadSimd128(d0, MemOperand(src.ra(), ip), r0, scratch);
+ mov(ip, Operand(dst.offset()));
+ LoadSimd128(d1, MemOperand(dst.ra(), ip), r0, scratch);
+
+ StoreSimd128(d0, MemOperand(dst.ra(), ip), r0, scratch);
+ mov(ip, Operand(src.offset()));
+ StoreSimd128(d1, MemOperand(src.ra(), ip), r0, scratch);
+
+ // restore d0 and d1
+ LoadSimd128(d0, MemOperand(r0, sp), ip, scratch);
+ li(ip, Operand(kSimd128Size));
+ LoadSimd128(d1, MemOperand(ip, sp), r0, scratch);
+ addi(sp, sp, Operand(2 * kSimd128Size));
+}
+
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index cea89a472c..0e9c764add 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -153,6 +153,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
+ void LoadSimd128(Simd128Register dst, const MemOperand& mem,
+ Register ScratchReg, Simd128Register ScratchDoubleReg);
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
@@ -175,6 +177,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = no_reg);
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
+ void StoreSimd128(Simd128Register src, const MemOperand& mem,
+ Register ScratchReg, Simd128Register ScratchDoubleReg);
void Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
@@ -326,6 +330,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
DoubleRegister scratch_1);
+ void SwapSimd128(Simd128Register src, Simd128Register dst,
+ Simd128Register scratch);
+ void SwapSimd128(Simd128Register src, MemOperand dst,
+ Simd128Register scratch);
+ void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index bf499668bb..2dcf0fbe8f 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_REGISTER_H_
#define V8_CODEGEN_REGISTER_H_
+#include "src/base/bounds.h"
#include "src/codegen/reglist.h"
namespace v8 {
@@ -32,10 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(0, code);
- DCHECK_GT(kNumRegisters, code);
-#endif
+ CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
@@ -47,9 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(is_valid());
-#endif
+ CONSTEXPR_DCHECK(is_valid());
return reg_code_;
}
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 2e62c6f1f1..d984b1e917 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -369,7 +369,7 @@ void RelocInfo::set_target_address(Address target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) {
Code target_code = Code::GetCodeFromTargetAddress(target);
- MarkingBarrierForCode(host(), this, target_code);
+ WriteBarrier::Marking(host(), this, target_code);
}
}
@@ -471,7 +471,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
const Address code_target = target_address();
Code code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code.IsCode());
- os << " (" << Code::Kind2String(code.kind());
+ os << " (" << CodeKindToString(code.kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code.builtin_index());
}
@@ -480,7 +480,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
- os << " (" << Deoptimizer::MessageFor(type)
+ os << " (" << Deoptimizer::MessageFor(type, false)
<< " deoptimization bailout)";
}
} else if (IsConstPool(rmode_)) {
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index 50ce001103..e6d7dfd01c 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -54,6 +54,8 @@ class RelocInfo {
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
+ NONE, // Never recorded value. Most common one, hence value 0.
+
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
COMPRESSED_EMBEDDED_OBJECT,
@@ -89,7 +91,6 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE, // never recorded value
LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
@@ -123,10 +124,8 @@ class RelocInfo {
return mode <= LAST_GCED_ENUM;
}
static constexpr bool IsShareableRelocMode(Mode mode) {
- static_assert(RelocInfo::NONE >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE,
- "Users of this function rely on NONE being a sharable "
- "relocation mode.");
- return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
+ return mode == RelocInfo::NONE ||
+ mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
static constexpr bool IsCodeTargetMode(Mode mode) {
@@ -329,6 +328,13 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
+ static constexpr int AllRealModesMask() {
+ constexpr Mode kFirstUnrealRelocMode =
+ static_cast<Mode>(RelocInfo::LAST_REAL_RELOC_MODE + 1);
+ return (ModeMask(kFirstUnrealRelocMode) - 1) &
+ ~(ModeMask(RelocInfo::FIRST_REAL_RELOC_MODE) - 1);
+ }
+
static int EmbeddedObjectModeMask() {
return ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
diff --git a/deps/v8/src/codegen/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index 6cd5e4d9fa..5c52435051 100644
--- a/deps/v8/src/codegen/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -567,11 +567,12 @@ using SixByteInstr = uint64_t;
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vscbi, VSCBI, \
0xE7F5) /* type = VRR_C VECTOR SUBTRACT COMPUTE BORROW INDICATION */ \
- V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
- V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
- V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
- V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
- V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */
+ V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
+ V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */
#define S390_VRI_A_OPCODE_LIST(V) \
V(vleib, VLEIB, 0xE740) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (8) */ \
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 8e0e9a4cf5..9e341c357b 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return r3;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r2; }
-
const Register LoadDescriptor::ReceiverRegister() { return r3; }
const Register LoadDescriptor::NameRegister() { return r4; }
const Register LoadDescriptor::SlotRegister() { return r2; }
@@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
@@ -295,6 +285,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 7e7d1434c4..08adf57ff7 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -1349,7 +1349,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract.
- // ARM has some sanity checks as per below, considering add them for S390
+ // ARM has some checks as per below, considering add them for S390
DCHECK_EQ(actual_parameter_count, r2);
DCHECK_EQ(expected_parameter_count, r4);
@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag(
// Reverse the byte_offset if emulating on little endian platform
byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
- tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
+ tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
} else {
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
AndP(r0, scratch, Operand(mask));
}
// Should be okay to remove rc
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 962b1ea17f..396cc9007f 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -10,26 +10,29 @@
#include "src/diagnostics/disasm.h"
#include "src/execution/frames-inl.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
+SafepointTable::SafepointTable(Code code)
+ : SafepointTable(code.InstructionStart(), code.SafepointTableAddress(),
+ code.stack_slots(), true) {}
+
+SafepointTable::SafepointTable(const wasm::WasmCode* code)
+ : SafepointTable(code->instruction_start(),
+ code->instruction_start() + code->safepoint_table_offset(),
+ code->stack_slots(), false) {}
+
SafepointTable::SafepointTable(Address instruction_start,
- size_t safepoint_table_offset,
+ Address safepoint_table_address,
uint32_t stack_slots, bool has_deopt)
: instruction_start_(instruction_start),
stack_slots_(stack_slots),
- has_deopt_(has_deopt) {
- Address header = instruction_start_ + safepoint_table_offset;
- length_ = Memory<uint32_t>(header + kLengthOffset);
- entry_size_ = Memory<uint32_t>(header + kEntrySizeOffset);
- pc_and_deoptimization_indexes_ = header + kHeaderSize;
- entries_ = pc_and_deoptimization_indexes_ + (length_ * kFixedEntrySize);
-}
-
-SafepointTable::SafepointTable(Code code)
- : SafepointTable(code.InstructionStart(), code.safepoint_table_offset(),
- code.stack_slots(), true) {}
+ has_deopt_(has_deopt),
+ safepoint_table_address_(safepoint_table_address),
+ length_(ReadLength(safepoint_table_address)),
+ entry_size_(ReadEntrySize(safepoint_table_address)) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
for (unsigned i = 0; i < length(); i++) {
@@ -40,7 +43,6 @@ unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
}
}
UNREACHABLE();
- return 0;
}
SafepointEntry SafepointTable::FindEntry(Address pc) const {
@@ -60,7 +62,6 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
}
}
UNREACHABLE();
- return SafepointEntry();
}
void SafepointTable::PrintEntry(unsigned index,
@@ -90,7 +91,7 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset()));
+ DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
DeoptimizationInfo& new_info = deoptimization_info_.back();
return Safepoint(new_info.indexes);
}
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 1df4311036..ef6ed2f6db 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+namespace wasm {
+class WasmCode;
+} // namespace wasm
+
class SafepointEntry {
public:
SafepointEntry() : deopt_index_(0), bits_(nullptr), trampoline_pc_(-1) {}
@@ -64,9 +68,7 @@ class SafepointEntry {
class SafepointTable {
public:
explicit SafepointTable(Code code);
- explicit SafepointTable(Address instruction_start,
- size_t safepoint_table_offset, uint32_t stack_slots,
- bool has_deopt = false);
+ explicit SafepointTable(const wasm::WasmCode* code);
int size() const {
return kHeaderSize + (length_ * (kFixedEntrySize + entry_size_));
@@ -90,7 +92,7 @@ class SafepointTable {
DCHECK(index < length_);
unsigned deopt_index =
base::Memory<uint32_t>(GetEncodedInfoLocation(index));
- uint8_t* bits = &base::Memory<uint8_t>(entries_ + (index * entry_size_));
+ uint8_t* bits = &base::Memory<uint8_t>(entries() + (index * entry_size_));
int trampoline_pc =
has_deopt_ ? base::Memory<int>(GetTrampolineLocation(index)) : -1;
return SafepointEntry(deopt_index, bits, trampoline_pc);
@@ -102,9 +104,12 @@ class SafepointTable {
void PrintEntry(unsigned index, std::ostream& os) const; // NOLINT
private:
+ SafepointTable(Address instruction_start, Address safepoint_table_address,
+ uint32_t stack_slots, bool has_deopt);
+
static const uint8_t kNoRegisters = 0xFF;
- // Layout information
+ // Layout information.
static const int kLengthOffset = 0;
static const int kEntrySizeOffset = kLengthOffset + kIntSize;
static const int kHeaderSize = kEntrySizeOffset + kIntSize;
@@ -113,8 +118,21 @@ class SafepointTable {
static const int kTrampolinePcOffset = kEncodedInfoOffset + kIntSize;
static const int kFixedEntrySize = kTrampolinePcOffset + kIntSize;
+ static uint32_t ReadLength(Address table) {
+ return base::Memory<uint32_t>(table + kLengthOffset);
+ }
+ static uint32_t ReadEntrySize(Address table) {
+ return base::Memory<uint32_t>(table + kEntrySizeOffset);
+ }
+ Address pc_and_deoptimization_indexes() const {
+ return safepoint_table_address_ + kHeaderSize;
+ }
+ Address entries() const {
+ return safepoint_table_address_ + kHeaderSize + (length_ * kFixedEntrySize);
+ }
+
Address GetPcOffsetLocation(unsigned index) const {
- return pc_and_deoptimization_indexes_ + (index * kFixedEntrySize);
+ return pc_and_deoptimization_indexes() + (index * kFixedEntrySize);
}
Address GetEncodedInfoLocation(unsigned index) const {
@@ -125,18 +143,18 @@ class SafepointTable {
return GetPcOffsetLocation(index) + kTrampolinePcOffset;
}
- static void PrintBits(std::ostream& os, // NOLINT
- uint8_t byte, int digits);
+ static void PrintBits(std::ostream& os, uint8_t byte, int digits);
DISALLOW_HEAP_ALLOCATION(no_allocation_)
- Address instruction_start_;
- uint32_t stack_slots_;
- unsigned length_;
- unsigned entry_size_;
- Address pc_and_deoptimization_indexes_;
- Address entries_;
- bool has_deopt_;
+ const Address instruction_start_;
+ const uint32_t stack_slots_;
+ const bool has_deopt_;
+
+ // Safepoint table layout.
+ const Address safepoint_table_address_;
+ const uint32_t length_;
+ const uint32_t entry_size_;
friend class SafepointTableBuilder;
friend class SafepointEntry;
@@ -193,7 +211,7 @@ class SafepointTableBuilder {
: pc(pc),
deopt_index(Safepoint::kNoDeoptimizationIndex),
trampoline(-1),
- indexes(new (zone) ZoneChunkList<int>(
+ indexes(zone->New<ZoneChunkList<int>>(
zone, ZoneChunkList<int>::StartMode::kSmall)) {}
};
diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h
index c05c440530..1213a1fd49 100644
--- a/deps/v8/src/codegen/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -89,7 +89,7 @@ class Signature : public ZoneObject {
Signature<T>* Build() {
DCHECK_EQ(rcursor_, return_count_);
DCHECK_EQ(pcursor_, parameter_count_);
- return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
+ return zone_->New<Signature<T>>(return_count_, parameter_count_, buffer_);
}
private:
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 0f03867331..3c8a108808 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -5,7 +5,7 @@
#include "src/codegen/source-position-table.h"
#include "src/base/export-template.h"
-#include "src/heap/off-thread-factory-inl.h"
+#include "src/heap/local-factory-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -49,10 +49,10 @@ void SubtractFromEntry(PositionTableEntry* value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>* bytes, T value) {
+void EncodeInt(ZoneVector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
- static const int kShift = sizeof(T) * kBitsPerByte - 1;
+ static constexpr int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((static_cast<unsigned_type>(value) << 1) ^ (value >> kShift));
DCHECK_GE(value, 0);
unsigned_type encoded = static_cast<unsigned_type>(value);
@@ -67,7 +67,7 @@ void EncodeInt(std::vector<byte>* bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
+void EncodeEntry(ZoneVector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -115,7 +115,7 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
+void CheckTableEquals(const ZoneVector<PositionTableEntry>& raw_entries,
SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
@@ -133,8 +133,14 @@ void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
- SourcePositionTableBuilder::RecordingMode mode)
- : mode_(mode), previous_() {}
+ Zone* zone, SourcePositionTableBuilder::RecordingMode mode)
+ : mode_(mode),
+ bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_(zone),
+#endif
+ previous_() {
+}
void SourcePositionTableBuilder::AddPosition(size_t code_offset,
SourcePosition source_position,
@@ -183,7 +189,7 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Isolate* isolate);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
- OffThreadIsolate* isolate);
+ LocalIsolate* isolate);
OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
if (bytes_.empty()) return OwnedVector<byte>();
diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index 024eca54fa..a42c6a44a3 100644
--- a/deps/v8/src/codegen/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
};
explicit SourcePositionTableBuilder(
- RecordingMode mode = RECORD_SOURCE_POSITIONS);
+ Zone* zone, RecordingMode mode = RECORD_SOURCE_POSITIONS);
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddEntry(const PositionTableEntry& entry);
RecordingMode mode_;
- std::vector<byte> bytes_;
+ ZoneVector<byte> bytes_;
#ifdef ENABLE_SLOW_DCHECKS
- std::vector<PositionTableEntry> raw_entries_;
+ ZoneVector<PositionTableEntry> raw_entries_;
#endif
PositionTableEntry previous_; // Previously written entry, to compute delta.
};
diff --git a/deps/v8/src/codegen/tick-counter.cc b/deps/v8/src/codegen/tick-counter.cc
index 2e72ae0e86..5172201f7e 100644
--- a/deps/v8/src/codegen/tick-counter.cc
+++ b/deps/v8/src/codegen/tick-counter.cc
@@ -6,18 +6,29 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/heap/local-heap.h"
namespace v8 {
namespace internal {
-void TickCounter::DoTick() {
+void TickCounter::TickAndMaybeEnterSafepoint() {
++ticks_;
// Magical number to detect performance bugs or compiler divergence.
// Selected as being roughly 10x of what's needed frequently.
constexpr size_t kMaxTicks = 100000000;
USE(kMaxTicks);
DCHECK_LT(ticks_, kMaxTicks);
+
+ if (local_heap_) local_heap_->Safepoint();
+}
+
+void TickCounter::AttachLocalHeap(LocalHeap* local_heap) {
+ DCHECK_NULL(local_heap_);
+ local_heap_ = local_heap;
+ DCHECK_NOT_NULL(local_heap_);
}
+void TickCounter::DetachLocalHeap() { local_heap_ = nullptr; }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/tick-counter.h b/deps/v8/src/codegen/tick-counter.h
index 8d6c966bb0..3dbb404cde 100644
--- a/deps/v8/src/codegen/tick-counter.h
+++ b/deps/v8/src/codegen/tick-counter.h
@@ -10,16 +10,23 @@
namespace v8 {
namespace internal {
-// A deterministic correlate of time, used to detect performance or
-// divergence bugs in Turbofan. DoTick() should be called frequently
-// thoughout the compilation.
+class LocalHeap;
+
+// This method generates a tick. Also makes the current thread to enter a
+// safepoint iff it was required to do so. The tick is used as a deterministic
+// correlate of time to detect performance or divergence bugs in Turbofan.
+// TickAndMaybeEnterSafepoint() should be called frequently thoughout the
+// compilation.
class TickCounter {
public:
- void DoTick();
+ void TickAndMaybeEnterSafepoint();
+ void AttachLocalHeap(LocalHeap* local_heap);
+ void DetachLocalHeap();
size_t CurrentTicks() const { return ticks_; }
private:
size_t ticks_ = 0;
+ LocalHeap* local_heap_ = nullptr;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index 83a05d0d44..ba1f609bcf 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -20,7 +20,7 @@ namespace compiler {
class Node;
-}
+} // namespace compiler
struct UntaggedT {};
@@ -191,6 +191,11 @@ constexpr bool IsMachineRepresentationOf(MachineRepresentation r) {
}
template <class T>
+constexpr MachineRepresentation PhiMachineRepresentationOf =
+ std::is_base_of<Word32T, T>::value ? MachineRepresentation::kWord32
+ : MachineRepresentationOf<T>::value;
+
+template <class T>
struct is_valid_type_tag {
static const bool value = std::is_base_of<Object, T>::value ||
std::is_base_of<UntaggedT, T>::value ||
@@ -355,7 +360,7 @@ class TNode {
return *this;
}
- bool is_null() { return node_ == nullptr; }
+ bool is_null() const { return node_ == nullptr; }
operator compiler::Node*() const { return node_; }
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.h b/deps/v8/src/codegen/unoptimized-compilation-info.h
index bb431dc98d..3cdb94158b 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.h
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
DCHECK_NOT_NULL(literal);
literal_ = literal;
}
+ void ClearLiteral() { literal_ = nullptr; }
DeclarationScope* scope() const;
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 287de802be..c1e2ec9808 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -132,168 +132,53 @@ uint32_t RelocInfo::wasm_call_tag() const {
// -----------------------------------------------------------------------------
// Implementation of Operand
-namespace {
-class OperandBuilder {
- public:
- OperandBuilder(Register base, int32_t disp) {
- if (base == rsp || base == r12) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && base != rbp && base != r13) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register base, Register index, ScaleFactor scale,
- int32_t disp) {
- DCHECK(index != rsp);
- set_sib(scale, index, base);
- if (disp == 0 && base != rbp && base != r13) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register index, ScaleFactor scale, int32_t disp) {
- DCHECK(index != rsp);
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
- }
-
- OperandBuilder(Label* label, int addend) {
- data_.addend = addend;
- DCHECK_NOT_NULL(label);
- DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
+Operand::Operand(Operand operand, int32_t offset) {
+ DCHECK_GE(operand.data().len, 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.data().buf[0];
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = ReadUnalignedValue<int32_t>(
+ reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
}
- OperandBuilder(Operand operand, int32_t offset) {
- DCHECK_GE(operand.data().len, 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.data().buf[0];
- DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless =
- (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = ReadUnalignedValue<int32_t>(
- reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- DCHECK(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- data_.rex = operand.data().rex;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- data_.len = disp_offset + 4;
- WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
- disp_value);
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- data_.len = disp_offset + 1;
- data_.buf[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- data_.buf[0] = (modrm & 0x3F); // Mode 0.
- data_.len = disp_offset;
- }
- if (has_sib) {
- data_.buf[1] = operand.data().buf[1];
- }
- }
-
- void set_modrm(int mod, Register rm_reg) {
- DCHECK(is_uint2(mod));
- data_.buf[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- data_.rex |= rm_reg.high_bit();
- }
-
- void set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK_EQ(data_.len, 1);
- DCHECK(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- DCHECK(index != rsp || base == rsp || base == r12);
- data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- data_.rex |= index.high_bit() << 1 | base.high_bit();
- data_.len = 2;
- }
-
- void set_disp8(int disp) {
- DCHECK(is_int8(disp));
- DCHECK(data_.len == 1 || data_.len == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
- *p = disp;
- data_.len += sizeof(int8_t);
- }
-
- void set_disp32(int disp) {
- DCHECK(data_.len == 1 || data_.len == 2);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(int32_t);
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ data_.rex = operand.data().rex;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ data_.len = disp_offset + 4;
+ WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
+ disp_value);
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ data_.len = disp_offset + 1;
+ data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ data_.buf[0] = (modrm & 0x3F); // Mode 0.
+ data_.len = disp_offset;
}
-
- void set_disp64(int64_t disp) {
- DCHECK_EQ(1, data_.len);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(disp);
+ if (has_sib) {
+ data_.buf[1] = operand.data().buf[1];
}
-
- const Operand::Data& data() const { return data_; }
-
- private:
- Operand::Data data_;
-};
-} // namespace
-
-Operand::Operand(Register base, int32_t disp)
- : data_(OperandBuilder(base, disp).data()) {}
-
-Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(base, index, scale, disp).data()) {}
-
-Operand::Operand(Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(index, scale, disp).data()) {}
-
-Operand::Operand(Label* label, int addend)
- : data_(OperandBuilder(label, addend).data()) {}
-
-Operand::Operand(Operand operand, int32_t offset)
- : data_(OperandBuilder(operand, offset).data()) {}
+}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
@@ -3424,6 +3309,20 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x08);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x09);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3443,8 +3342,8 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pmovmskb(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
emit(0x66);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xD7);
emit_sse_operand(dst, src);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 24eb976578..ac0c66ae5d 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -173,13 +173,48 @@ class V8_EXPORT_PRIVATE Operand {
};
// [base + disp/r]
- Operand(Register base, int32_t disp);
+ V8_INLINE Operand(Register base, int32_t disp) {
+ if (base == rsp || base == r12) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && base != rbp && base != r13) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+ }
// [base + index*scale + disp/r]
- Operand(Register base, Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register base, Register index, ScaleFactor scale,
+ int32_t disp) {
+ DCHECK(index != rsp);
+ set_sib(scale, index, base);
+ if (disp == 0 && base != rbp && base != r13) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+ }
// [index*scale + disp/r]
- Operand(Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register index, ScaleFactor scale, int32_t disp) {
+ DCHECK(index != rsp);
+ set_modrm(0, rsp);
+ set_sib(scale, index, rbp);
+ set_disp32(disp);
+ }
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
@@ -187,25 +222,64 @@ class V8_EXPORT_PRIVATE Operand {
Operand(Operand base, int32_t offset);
// [rip + disp/r]
- explicit Operand(Label* label, int addend = 0);
+ V8_INLINE explicit Operand(Label* label, int addend = 0) {
+ data_.addend = addend;
+ DCHECK_NOT_NULL(label);
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+ }
Operand(const Operand&) V8_NOEXCEPT = default;
+ const Data& data() const { return data_; }
+
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return data_.rex != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return data_.len; }
+ private:
+ V8_INLINE void set_modrm(int mod, Register rm_reg) {
+ DCHECK(is_uint2(mod));
+ data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ // Set REX.B to the high bit of rm.code().
+ data_.rex |= rm_reg.high_bit();
+ }
- const Data& data() const { return data_; }
+ V8_INLINE void set_sib(ScaleFactor scale, Register index, Register base) {
+ DCHECK_EQ(data_.len, 1);
+ DCHECK(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
+ DCHECK(index != rsp || base == rsp || base == r12);
+ data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ data_.rex |= index.high_bit() << 1 | base.high_bit();
+ data_.len = 2;
+ }
- private:
- const Data data_;
+ V8_INLINE void set_disp8(int disp) {
+ DCHECK(is_int8(disp));
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int8_t);
+ }
+
+ V8_INLINE void set_disp32(int disp) {
+ DCHECK(data_.len == 1 || data_.len == 2);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(int32_t);
+ }
+
+ V8_INLINE void set_disp64(int64_t disp) {
+ DCHECK_EQ(1, data_.len);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(disp);
+ }
+
+ Data data_;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -1141,6 +1215,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
void cmpps(XMMRegister dst, Operand src, int8_t cmp);
@@ -1358,6 +1434,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
@@ -1562,6 +1646,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
+ void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(imm8);
+ }
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index 6b9754efca..d86089ca1d 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -48,11 +48,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
- return rdi;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return rax; }
-
const Register LoadDescriptor::ReceiverRegister() { return rdx; }
const Register LoadDescriptor::NameRegister() { return rcx; }
const Register LoadDescriptor::SlotRegister() { return rax; }
@@ -129,6 +124,16 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ // rdx : the feedback slot
+ Register registers[] = {rdi, rax, rbx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -137,6 +142,16 @@ void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rbx : the arguments list
+ // rdx : the feedback slot
+ // rax : the feedback vector
+ Register registers[] = {rdi, rbx, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -168,6 +183,16 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the feedback slot
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -177,6 +202,16 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ // rax : the feedback slot
+ Register registers[] = {rdi, rdx, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -193,11 +228,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, rax};
@@ -289,7 +319,7 @@ void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -307,7 +337,7 @@ void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 7d6fdc5eb3..faa96b7d3f 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -1353,6 +1353,12 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
+void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
+ Move(dst, low);
+ movq(kScratchRegister, high);
+ Pinsrq(dst, kScratchRegister, int8_t{1});
+}
+
// ----------------------------------------------------------------------------
void MacroAssembler::Absps(XMMRegister dst) {
@@ -2756,10 +2762,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
Immediate(static_cast<uint8_t>(mask)));
} else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 8382bf5a28..995f2565cc 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -174,8 +174,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Cmpneqpd, cmpneqpd)
AVX_OP(Cmpnltpd, cmpnltpd)
AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Roundss, roundss)
- AVX_OP(Roundsd, roundsd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Sqrtps, sqrtps)
@@ -204,6 +202,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Psrlw, psrlw)
AVX_OP(Psrld, psrld)
AVX_OP(Psrlq, psrlq)
+ AVX_OP(Pmaddwd, pmaddwd)
AVX_OP(Paddb, paddb)
AVX_OP(Paddw, paddw)
AVX_OP(Paddd, paddd)
@@ -283,6 +282,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pextrq, pextrq)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundpd, roundpd)
+ AVX_OP_SSE4_1(Roundss, roundss)
+ AVX_OP_SSE4_1(Roundsd, roundsd)
AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
#undef AVX_OP
@@ -429,6 +432,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(XMMRegister dst, uint64_t src);
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+ void Move(XMMRegister dst, uint64_t high, uint64_t low);
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -442,7 +446,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
- DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
+ DCHECK(rmode == RelocInfo::NONE || rmode > RelocInfo::LAST_GCED_ENUM);
movq(dst, Immediate64(ptr, rmode));
}
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 74ec16d6a2..52107ed6b9 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -57,6 +57,7 @@
V(packssdw, 66, 0F, 6B) \
V(punpcklqdq, 66, 0F, 6C) \
V(punpckhqdq, 66, 0F, 6D) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \