summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc10
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h12
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h4
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc8
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h12
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc2
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.h7
-rw-r--r--deps/v8/src/codegen/code-reference.cc24
-rw-r--r--deps/v8/src/codegen/code-reference.h21
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc226
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h51
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc14
-rw-r--r--deps/v8/src/codegen/compiler.cc813
-rw-r--r--deps/v8/src/codegen/compiler.h89
-rw-r--r--deps/v8/src/codegen/constant-pool.h8
-rw-r--r--deps/v8/src/codegen/cpu-features.h2
-rw-r--r--deps/v8/src/codegen/external-reference-table.cc8
-rw-r--r--deps/v8/src/codegen/external-reference.cc49
-rw-r--r--deps/v8/src/codegen/external-reference.h34
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h6
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc55
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h199
-rw-r--r--deps/v8/src/codegen/ia32/fma-instr.h58
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc6
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h11
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc25
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h8
-rw-r--r--deps/v8/src/codegen/machine-type.h4
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc12
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc25
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc8
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h15
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc26
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc7
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h2
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc17
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h16
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h14
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc51
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h6
-rw-r--r--deps/v8/src/codegen/reloc-info.cc6
-rw-r--r--deps/v8/src/codegen/reloc-info.h6
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc285
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h121
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h97
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc101
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h13
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h14
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc6
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h10
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h20
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc43
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h4
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc340
-rw-r--r--deps/v8/src/codegen/safepoint-table.h244
-rw-r--r--deps/v8/src/codegen/script-details.h2
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc95
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h11
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc12
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h1
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.cc5
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.h8
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc109
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h47
-rw-r--r--deps/v8/src/codegen/x64/fma-instr.h42
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc111
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h13
80 files changed, 2181 insertions, 1601 deletions
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index a3c3ffdba6..bf654f6789 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -10,3 +10,5 @@ mslekova@chromium.org
mvstanton@chromium.org
nicohartmann@chromium.org
zhin@chromium.org
+
+per-file compiler.*=marja@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 0ee81b2f94..9080b3e0b3 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -195,7 +195,7 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = static_cast<int32_t>(f.address());
}
-Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
+Operand::Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 38d691007f..e434cac32d 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -1132,7 +1132,7 @@ bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (RelocInfo::IsOnlyForSerializer(rmode)) {
if (assembler->predictable_code_size()) return true;
return assembler->options().record_reloc_info_for_serialization;
- } else if (RelocInfo::IsNone(rmode)) {
+ } else if (RelocInfo::IsNoInfo(rmode)) {
return false;
}
return true;
@@ -1464,7 +1464,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool b_imm_check = is_int24(imm24);
@@ -1478,7 +1478,7 @@ void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
}
void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool bl_imm_check = is_int24(imm24);
@@ -5226,7 +5226,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
// blocked before using dd.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -5240,7 +5240,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
// blocked before using dq.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index a7d224a094..4cce50f795 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -405,9 +405,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Branch instructions
void b(int branch_offset, Condition cond = al,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
void bl(int branch_offset, Condition cond = al,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
@@ -1095,9 +1095,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 5c46c64b3e..95eb8795e9 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2022,7 +2022,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 73efa12002..e43aec485f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -800,7 +800,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index c5a1d4fd8a..40b9a94dd8 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -192,7 +192,7 @@ inline VRegister CPURegister::Q() const {
// Default initializer is for int types
template <typename T>
struct ImmediateInitializer {
- static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
+ static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NO_INFO; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
@@ -202,7 +202,7 @@ struct ImmediateInitializer {
template <>
struct ImmediateInitializer<Smi> {
- static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
+ static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NO_INFO; }
static inline int64_t immediate_for(Smi t) {
return static_cast<int64_t>(t.ptr());
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 627c7ae021..fd5cd326ec 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -314,7 +314,7 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return assembler->options().record_reloc_info_for_serialization;
}
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
// Assembler
@@ -4375,13 +4375,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
BlockPoolsScope no_pool_before_b_instr(this);
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
+ if (!RelocInfo::IsNoInfo(rmode))
+ RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
}
void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
BlockPoolsScope no_pool_before_bl_instr(this);
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
+ if (!RelocInfo::IsNoInfo(rmode))
+ RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
bl(offset);
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index dac90f8058..df8fadf1f1 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -2065,27 +2065,27 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Required by V8.
void db(uint8_t data) { dc8(data); }
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc32(data);
}
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc64(data);
}
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index bcf2e4574a..58920c343a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1655,7 +1655,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Br(kOffHeapTrampolineRegister);
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 165d702c31..7c972bd307 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -1911,7 +1911,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index ae6c4c9200..29a4212aac 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 50711046e6..6519520278 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -39,6 +39,7 @@
#include <memory>
#include <unordered_map>
+#include "src/base/macros.h"
#include "src/base/memory.h"
#include "src/codegen/code-comments.h"
#include "src/codegen/cpu-features.h"
@@ -64,7 +65,7 @@ using base::WriteUnalignedValue;
// Forward declarations.
class EmbeddedData;
-class InstructionStream;
+class OffHeapInstructionStream;
class Isolate;
class SCTableReference;
class SourcePosition;
@@ -387,7 +388,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
void RequestHeapObject(HeapObjectRequest request);
bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
- DCHECK(!RelocInfo::IsNone(rmode));
+ DCHECK(!RelocInfo::IsNoInfo(rmode));
if (options().disable_reloc_info_for_patching) return false;
if (RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !FLAG_debug_code) {
@@ -470,7 +471,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#ifdef V8_CODE_COMMENTS
#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
#define ASM_CODE_COMMENT_STRING(asm, comment) \
- AssemblerBase::CodeComment asm_code_comment(asm, comment)
+ AssemblerBase::CodeComment UNIQUE_IDENTIFIER(asm_code_comment)(asm, comment)
#else
#define ASM_CODE_COMMENT(asm)
#define ASM_CODE_COMMENT_STRING(asm, ...)
diff --git a/deps/v8/src/codegen/code-reference.cc b/deps/v8/src/codegen/code-reference.cc
index 0c550fa0d3..27ff425a2f 100644
--- a/deps/v8/src/codegen/code-reference.cc
+++ b/deps/v8/src/codegen/code-reference.cc
@@ -86,26 +86,26 @@ struct CodeDescOps {
ret CodeReference::method() const { \
DCHECK(!is_null()); \
switch (kind_) { \
- case JS: \
+ case Kind::JS: \
return JSOps{js_code_}.method(); \
- case WASM: \
+ case Kind::WASM: \
return WasmOps{wasm_code_}.method(); \
- case CODE_DESC: \
+ case Kind::CODE_DESC: \
return CodeDescOps{code_desc_}.method(); \
default: \
UNREACHABLE(); \
} \
}
#else
-#define DISPATCH(ret, method) \
- ret CodeReference::method() const { \
- DCHECK(!is_null()); \
- DCHECK(kind_ == JS || kind_ == CODE_DESC); \
- if (kind_ == JS) { \
- return JSOps{js_code_}.method(); \
- } else { \
- return CodeDescOps{code_desc_}.method(); \
- } \
+#define DISPATCH(ret, method) \
+ ret CodeReference::method() const { \
+ DCHECK(!is_null()); \
+ DCHECK(kind_ == Kind::JS || kind_ == Kind::CODE_DESC); \
+ if (kind_ == Kind::JS) { \
+ return JSOps{js_code_}.method(); \
+ } else { \
+ return CodeDescOps{code_desc_}.method(); \
+ } \
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h
index 8ff3581689..9b54b6074e 100644
--- a/deps/v8/src/codegen/code-reference.h
+++ b/deps/v8/src/codegen/code-reference.h
@@ -20,12 +20,13 @@ class WasmCode;
class CodeReference {
public:
- CodeReference() : kind_(NONE), null_(nullptr) {}
+ CodeReference() : kind_(Kind::NONE), null_(nullptr) {}
explicit CodeReference(const wasm::WasmCode* wasm_code)
- : kind_(WASM), wasm_code_(wasm_code) {}
+ : kind_(Kind::WASM), wasm_code_(wasm_code) {}
explicit CodeReference(const CodeDesc* code_desc)
- : kind_(CODE_DESC), code_desc_(code_desc) {}
- explicit CodeReference(Handle<Code> js_code) : kind_(JS), js_code_(js_code) {}
+ : kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
+ explicit CodeReference(Handle<Code> js_code)
+ : kind_(Kind::JS), js_code_(js_code) {}
Address constant_pool() const;
Address instruction_start() const;
@@ -37,22 +38,22 @@ class CodeReference {
Address code_comments() const;
int code_comments_size() const;
- bool is_null() const { return kind_ == NONE; }
- bool is_js() const { return kind_ == JS; }
- bool is_wasm_code() const { return kind_ == WASM; }
+ bool is_null() const { return kind_ == Kind::NONE; }
+ bool is_js() const { return kind_ == Kind::JS; }
+ bool is_wasm_code() const { return kind_ == Kind::WASM; }
Handle<Code> as_js_code() const {
- DCHECK_EQ(JS, kind_);
+ DCHECK_EQ(Kind::JS, kind_);
return js_code_;
}
const wasm::WasmCode* as_wasm_code() const {
- DCHECK_EQ(WASM, kind_);
+ DCHECK_EQ(Kind::WASM, kind_);
return wasm_code_;
}
private:
- enum { NONE, JS, WASM, CODE_DESC } kind_;
+ enum class Kind { NONE, JS, WASM, CODE_DESC } kind_;
union {
std::nullptr_t null_;
const wasm::WasmCode* wasm_code_;
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 4a9c06bdd8..db50f7d3e4 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -22,6 +22,7 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -1539,16 +1540,21 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
}
}
-#ifdef V8_CAGED_POINTERS
-
-TNode<CagedPtrT> CodeStubAssembler::LoadCagedPointerFromObject(
+TNode<RawPtrT> CodeStubAssembler::LoadCagedPointerFromObject(
TNode<HeapObject> object, TNode<IntPtrT> field_offset) {
- return LoadObjectField<CagedPtrT>(object, field_offset);
+#ifdef V8_CAGED_POINTERS
+ return ReinterpretCast<RawPtrT>(
+ LoadObjectField<CagedPtrT>(object, field_offset));
+#else
+ return LoadObjectField<RawPtrT>(object, field_offset);
+#endif // V8_CAGED_POINTERS
}
void CodeStubAssembler::StoreCagedPointerToObject(TNode<HeapObject> object,
TNode<IntPtrT> offset,
- TNode<CagedPtrT> pointer) {
+ TNode<RawPtrT> pointer) {
+#ifdef V8_CAGED_POINTERS
+ TNode<CagedPtrT> caged_pointer = ReinterpretCast<CagedPtrT>(pointer);
#ifdef DEBUG
// Verify pointer points into the cage.
TNode<ExternalReference> cage_base_address =
@@ -1557,13 +1563,26 @@ void CodeStubAssembler::StoreCagedPointerToObject(TNode<HeapObject> object,
ExternalConstant(ExternalReference::virtual_memory_cage_end_address());
TNode<UintPtrT> cage_base = Load<UintPtrT>(cage_base_address);
TNode<UintPtrT> cage_end = Load<UintPtrT>(cage_end_address);
- CSA_CHECK(this, UintPtrGreaterThanOrEqual(pointer, cage_base));
- CSA_CHECK(this, UintPtrLessThan(pointer, cage_end));
-#endif
- StoreObjectFieldNoWriteBarrier<CagedPtrT>(object, offset, pointer);
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(caged_pointer, cage_base));
+ CSA_DCHECK(this, UintPtrLessThan(caged_pointer, cage_end));
+#endif // DEBUG
+ StoreObjectFieldNoWriteBarrier<CagedPtrT>(object, offset, caged_pointer);
+#else
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
+#endif // V8_CAGED_POINTERS
}
+TNode<RawPtrT> CodeStubAssembler::EmptyBackingStoreBufferConstant() {
+#ifdef V8_CAGED_POINTERS
+ // TODO(chromium:1218005) consider creating a LoadCagedPointerConstant() if
+ // more of these constants are required later on.
+ TNode<ExternalReference> empty_backing_store_buffer =
+ ExternalConstant(ExternalReference::empty_backing_store_buffer());
+ return Load<RawPtrT>(empty_backing_store_buffer);
+#else
+ return ReinterpretCast<RawPtrT>(IntPtrConstant(0));
#endif // V8_CAGED_POINTERS
+}
TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer(
TNode<Uint32T> value) {
@@ -1679,6 +1698,11 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
+TNode<Uint8T> CodeStubAssembler::LoadUint8Ptr(TNode<RawPtrT> ptr,
+ TNode<IntPtrT> offset) {
+ return Load<Uint8T>(IntPtrAdd(ReinterpretCast<IntPtrT>(ptr), offset));
+}
+
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
// Please use LoadMap(object) instead.
@@ -2892,8 +2916,10 @@ TNode<BoolT> CodeStubAssembler::IsGeneratorFunction(
SharedFunctionInfo::kFlagsOffset));
// See IsGeneratorFunction(FunctionKind kind).
- return IsInRange(function_kind, FunctionKind::kAsyncConciseGeneratorMethod,
- FunctionKind::kConciseGeneratorMethod);
+ return IsInRange(
+ function_kind,
+ static_cast<uint32_t>(FunctionKind::kAsyncConciseGeneratorMethod),
+ static_cast<uint32_t>(FunctionKind::kConciseGeneratorMethod));
}
TNode<BoolT> CodeStubAssembler::IsJSFunctionWithPrototypeSlot(
@@ -6142,6 +6168,20 @@ void CodeStubAssembler::ThrowTypeError(TNode<Context> context,
Unreachable();
}
+TNode<HeapObject> CodeStubAssembler::GetPendingMessage() {
+ TNode<ExternalReference> pending_message = ExternalConstant(
+ ExternalReference::address_of_pending_message(isolate()));
+ return UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
+}
+void CodeStubAssembler::SetPendingMessage(TNode<HeapObject> message) {
+ CSA_DCHECK(this, Word32Or(IsTheHole(message),
+ InstanceTypeEqual(LoadInstanceType(message),
+ JS_MESSAGE_OBJECT_TYPE)));
+ TNode<ExternalReference> pending_message = ExternalConstant(
+ ExternalReference::address_of_pending_message(isolate()));
+ StoreFullTaggedNoWriteBarrier(pending_message, message);
+}
+
TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(TNode<Int32T> instance_type,
int type) {
return Word32Equal(instance_type, Int32Constant(type));
@@ -6362,8 +6402,8 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type,
- Int32Constant(kStringRepresentationMask | kStringEncodingMask)),
- Int32Constant(kSeqStringTag | kOneByteStringTag));
+ Int32Constant(kStringRepresentationAndEncodingMask)),
+ Int32Constant(kSeqOneByteStringTag));
}
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
@@ -8089,6 +8129,25 @@ TNode<RawPtr<Uint16T>> CodeStubAssembler::ExternalTwoByteStringGetChars(
std::make_pair(MachineType::AnyTagged(), string)));
}
+TNode<RawPtr<Uint8T>> CodeStubAssembler::IntlAsciiCollationWeightsL1() {
+#ifdef V8_INTL_SUPPORT
+ TNode<RawPtrT> ptr =
+ ExternalConstant(ExternalReference::intl_ascii_collation_weights_l1());
+ return ReinterpretCast<RawPtr<Uint8T>>(ptr);
+#else
+ UNREACHABLE();
+#endif
+}
+TNode<RawPtr<Uint8T>> CodeStubAssembler::IntlAsciiCollationWeightsL3() {
+#ifdef V8_INTL_SUPPORT
+ TNode<RawPtrT> ptr =
+ ExternalConstant(ExternalReference::intl_ascii_collation_weights_l3());
+ return ReinterpretCast<RawPtr<Uint8T>>(ptr);
+#else
+ UNREACHABLE();
+#endif
+}
+
void CodeStubAssembler::TryInternalizeString(
TNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
Label* if_internalized, TVariable<Name>* var_internalized,
@@ -8561,7 +8620,9 @@ TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement(
TNode<Uint32T> details = LoadDetailsByKeyIndex(dictionary, index);
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
- GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
+ GotoIfNot(
+ Word32Equal(kind, Int32Constant(static_cast<int>(PropertyKind::kData))),
+ not_data);
// Finally, load the value.
return LoadValueByKeyIndex(dictionary, index);
}
@@ -8607,7 +8668,7 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(
StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
// Prepare details of the new property.
- PropertyDetails d(kData, NONE,
+ PropertyDetails d(PropertyKind::kData, NONE,
PropertyDetails::kConstIfDictConstnessTracking);
enum_index =
@@ -8677,10 +8738,10 @@ template <>
void CodeStubAssembler::Add(TNode<SwissNameDictionary> dictionary,
TNode<Name> key, TNode<Object> value,
Label* bailout) {
- PropertyDetails d(kData, NONE,
+ PropertyDetails d(PropertyKind::kData, NONE,
PropertyDetails::kConstIfDictConstnessTracking);
- PropertyDetails d_dont_enum(kData, DONT_ENUM,
+ PropertyDetails d_dont_enum(PropertyKind::kData, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking);
TNode<Uint8T> details_byte_enum =
UncheckedCast<Uint8T>(Uint32Constant(d.ToByte()));
@@ -9517,7 +9578,9 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Label done(this), if_accessor_info(this, Label::kDeferred);
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
- GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
+ GotoIf(
+ Word32Equal(kind, Int32Constant(static_cast<int>(PropertyKind::kData))),
+ &done);
// Accessor case.
GotoIfNot(IsAccessorPair(CAST(value)), &if_accessor_info);
@@ -11399,7 +11462,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Store an empty fixed array for the code dependency.
StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
- RootIndex::kEmptyWeakFixedArray);
+ DependentCode::kEmptyDependentCode);
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
@@ -13830,8 +13893,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadObjectField<RawPtrT>(array_buffer,
- JSArrayBuffer::kBackingStoreOffset);
+ return LoadCagedPointerFromObject(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -13858,7 +13921,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached(
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
Label variable_length(this), fixed_length(this), end(this);
- Branch(IsVariableLengthTypedArray(typed_array), &variable_length,
+ Branch(IsVariableLengthJSArrayBufferView(typed_array), &variable_length,
&fixed_length);
BIND(&variable_length);
{
@@ -13881,36 +13944,55 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached(
// ES #sec-integerindexedobjectlength
TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
- TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss) {
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds) {
+ // byte_length already takes array's offset into account.
+ TNode<UintPtrT> byte_length = LoadVariableLengthJSArrayBufferViewByteLength(
+ array, buffer, detached_or_out_of_bounds);
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ return Unsigned(IntPtrDiv(Signed(byte_length), element_size));
+}
+
+TNode<UintPtrT>
+CodeStubAssembler::LoadVariableLengthJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds) {
Label is_gsab(this), is_rab(this), end(this);
TVARIABLE(UintPtrT, result);
+ TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab);
BIND(&is_gsab);
{
- // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
- CSA_DCHECK(this, IsLengthTrackingTypedArray(array));
+ // Non-length-tracking GSAB-backed ArrayBufferViews shouldn't end up here.
+ CSA_DCHECK(this, IsLengthTrackingJSArrayBufferView(array));
// Read the byte length from the BackingStore.
- const TNode<ExternalReference> length_function = ExternalConstant(
- ExternalReference::length_tracking_gsab_backed_typed_array_length());
+ const TNode<ExternalReference> byte_length_function =
+ ExternalConstant(ExternalReference::gsab_byte_length());
TNode<ExternalReference> isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- result = UncheckedCast<UintPtrT>(
- CallCFunction(length_function, MachineType::UintPtr(),
+ TNode<UintPtrT> buffer_byte_length = UncheckedCast<UintPtrT>(
+ CallCFunction(byte_length_function, MachineType::UintPtr(),
std::make_pair(MachineType::Pointer(), isolate_ptr),
- std::make_pair(MachineType::AnyTagged(), array)));
+ std::make_pair(MachineType::AnyTagged(), buffer)));
+ // Since the SharedArrayBuffer can't shrink, and we've managed to create
+ // this JSArrayBufferDataView without throwing an exception, we know that
+ // buffer_byte_length >= array_byte_offset.
+ CSA_CHECK(this,
+ UintPtrGreaterThanOrEqual(buffer_byte_length, array_byte_offset));
+ result = UintPtrSub(buffer_byte_length, array_byte_offset);
Goto(&end);
}
BIND(&is_rab);
{
- GotoIf(IsDetachedBuffer(buffer), miss);
+ GotoIf(IsDetachedBuffer(buffer), detached_or_out_of_bounds);
TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
- TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Label is_length_tracking(this), not_length_tracking(this);
- Branch(IsLengthTrackingTypedArray(array), &is_length_tracking,
+ Branch(IsLengthTrackingJSArrayBufferView(array), &is_length_tracking,
&not_length_tracking);
BIND(&is_length_tracking);
@@ -13918,16 +14000,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
// The backing RAB might have been shrunk so that the start of the
// TypedArray is already out of bounds.
GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length),
- miss);
- // length = (buffer_byte_length - byte_offset) / element_size
- // Conversion to signed is OK since buffer_byte_length <
- // JSArrayBuffer::kMaxByteLength.
- TNode<IntPtrT> element_size =
- RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
- TNode<IntPtrT> length =
- IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)),
- element_size);
- result = Unsigned(length);
+ detached_or_out_of_bounds);
+ result = UintPtrSub(buffer_byte_length, array_byte_offset);
Goto(&end);
}
@@ -13940,8 +14014,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
GotoIfNot(UintPtrGreaterThanOrEqual(
buffer_byte_length,
UintPtrAdd(array_byte_offset, array_byte_length)),
- miss);
- result = LoadJSTypedArrayLength(array);
+ detached_or_out_of_bounds);
+ result = array_byte_length;
Goto(&end);
}
}
@@ -13949,13 +14023,13 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
return result.value();
}
-void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
- TNode<JSTypedArray> array, Label* detached_or_oob,
+void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds(
+ TNode<JSArrayBufferView> array, Label* detached_or_oob,
Label* not_detached_nor_oob) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), detached_or_oob);
- GotoIfNot(IsVariableLengthTypedArray(array), not_detached_nor_oob);
+ GotoIfNot(IsVariableLengthJSArrayBufferView(array), not_detached_nor_oob);
GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob);
{
@@ -13963,7 +14037,7 @@ void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Label length_tracking(this), not_length_tracking(this);
- Branch(IsLengthTrackingTypedArray(array), &length_tracking,
+ Branch(IsLengthTrackingJSArrayBufferView(array), &length_tracking,
&not_length_tracking);
BIND(&length_tracking);
@@ -14066,10 +14140,10 @@ TNode<JSArrayBuffer> CodeStubAssembler::GetTypedArrayBuffer(
Label call_runtime(this), done(this);
TVARIABLE(Object, var_result);
+ GotoIf(IsOnHeapTypedArray(array), &call_runtime);
+
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), &call_runtime);
- TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStorePtr(buffer);
- GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
var_result = buffer;
Goto(&done);
@@ -14332,24 +14406,30 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
return Word32NotEqual(flags, Int32Constant(0));
}
-TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
+TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
- return CAST(BitcastWordToTagged(Load<RawPtrT>(
- ExternalConstant(ExternalReference::builtins_address(isolate())),
- offset)));
+ TNode<ExternalReference> table = ExternalConstant(
+#ifdef V8_EXTERNAL_CODE_SPACE
+ ExternalReference::builtins_code_data_container_table(isolate())
+#else
+ ExternalReference::builtins_table(isolate())
+#endif // V8_EXTERNAL_CODE_SPACE
+ ); // NOLINT(whitespace/parens)
+
+ return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
}
-TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
+TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TVARIABLE(Code, sfi_code);
+ TVARIABLE(CodeT, sfi_code);
Label done(this);
Label check_instance_type(this);
@@ -14378,6 +14458,8 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
#if V8_ENABLE_WEBASSEMBLY
WASM_CAPI_FUNCTION_DATA_TYPE,
@@ -14389,16 +14471,17 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_bytecode_array(this);
Label check_is_baseline_data(this);
Label check_is_asm_wasm_data(this);
- Label check_is_uncompiled_data_without_preparse_data(this);
- Label check_is_uncompiled_data_with_preparse_data(this);
+ Label check_is_uncompiled_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
Label check_is_wasm_function_data(this);
Label* case_labels[] = {
&check_is_bytecode_array,
&check_is_baseline_data,
- &check_is_uncompiled_data_without_preparse_data,
- &check_is_uncompiled_data_with_preparse_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
&check_is_function_template_info,
#if V8_ENABLE_WEBASSEMBLY
&check_is_wasm_function_data,
@@ -14413,28 +14496,26 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBytecodeArray: Interpret bytecode
BIND(&check_is_bytecode_array);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InterpreterEntryTrampoline));
Goto(&done);
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
TNode<CodeT> baseline_code = CAST(sfi_data);
- sfi_code = FromCodeT(baseline_code);
+ sfi_code = baseline_code;
Goto(&done);
}
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
- BIND(&check_is_uncompiled_data_with_preparse_data);
- Goto(&check_is_uncompiled_data_without_preparse_data);
- BIND(&check_is_uncompiled_data_without_preparse_data);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ BIND(&check_is_uncompiled_data);
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
Goto(if_compile_lazy ? if_compile_lazy : &done);
// IsFunctionTemplateInfo: API call
BIND(&check_is_function_template_info);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), HandleApiCall));
Goto(&done);
// IsInterpreterData: Interpret bytecode
@@ -14445,7 +14526,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
{
TNode<CodeT> trampoline =
LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
- sfi_code = FromCodeT(trampoline);
+ sfi_code = trampoline;
}
Goto(&done);
@@ -14458,7 +14539,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsAsmWasmData: Instantiate using AsmWasmData
BIND(&check_is_asm_wasm_data);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InstantiateAsmJs));
Goto(&done);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -14482,8 +14563,7 @@ TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeT> code) {
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context) {
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- const TNode<Code> code = GetSharedFunctionInfoCode(shared_info);
+ const TNode<CodeT> code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
@@ -14502,7 +14582,7 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
- StoreObjectField(fun, JSFunction::kCodeOffset, ToCodeT(code));
+ StoreObjectField(fun, JSFunction::kCodeOffset, code);
return CAST(fun);
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 4d16af8a3d..109bd9cfa4 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/codegen/bailout-reason.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
@@ -1042,32 +1043,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
-#ifdef V8_CAGED_POINTERS
-
//
// Caged pointer related functionality.
//
// Load a caged pointer value from an object.
- TNode<CagedPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- int offset) {
+ TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
+ int offset) {
return LoadCagedPointerFromObject(object, IntPtrConstant(offset));
}
- TNode<CagedPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset);
+ TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
// Stored a caged pointer value to an object.
void StoreCagedPointerToObject(TNode<HeapObject> object, int offset,
- TNode<CagedPtrT> pointer) {
+ TNode<RawPtrT> pointer) {
StoreCagedPointerToObject(object, IntPtrConstant(offset), pointer);
}
void StoreCagedPointerToObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset,
- TNode<CagedPtrT> pointer);
+ TNode<IntPtrT> offset, TNode<RawPtrT> pointer);
-#endif // V8_CAGED_POINTERS
+ TNode<RawPtrT> EmptyBackingStoreBufferConstant();
//
// ExternalPointerT-related functionality.
@@ -1147,14 +1145,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadObjectField<RawPtrT>(holder,
- JSTypedArray::kExternalPointerOffset);
+ return LoadCagedPointerFromObject(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreObjectFieldNoWriteBarrier<RawPtrT>(
- holder, JSTypedArray::kExternalPointerOffset, value);
+ StoreCagedPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
+ value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1178,6 +1176,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> LoadBufferIntptr(TNode<RawPtrT> buffer, int offset) {
return LoadBufferData<IntPtrT>(buffer, offset);
}
+ TNode<Uint8T> LoadUint8Ptr(TNode<RawPtrT> ptr, TNode<IntPtrT> offset);
+
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value &&
@@ -2461,6 +2461,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
base::Optional<TNode<Object>> arg1 = base::nullopt,
base::Optional<TNode<Object>> arg2 = base::nullopt);
+ TNode<HeapObject> GetPendingMessage();
+ void SetPendingMessage(TNode<HeapObject> message);
+
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
@@ -2937,6 +2940,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtr<Uint16T>> ExternalTwoByteStringGetChars(
TNode<ExternalTwoByteString> string);
+ TNode<RawPtr<Uint8T>> IntlAsciiCollationWeightsL1();
+ TNode<RawPtr<Uint8T>> IntlAsciiCollationWeightsL3();
+
// Performs a hash computation and string table lookup for the given string,
// and jumps to:
// - |if_index| if the string is an array index like "123"; |var_index|
@@ -3603,15 +3609,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Helper for length tracking JSTypedArrays and JSTypedArrays backed by
// ResizableArrayBuffer.
TNode<UintPtrT> LoadVariableLengthJSTypedArrayLength(
- TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss);
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds);
// Helper for length tracking JSTypedArrays and JSTypedArrays backed by
// ResizableArrayBuffer.
TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
TNode<JSArrayBuffer> buffer);
- void IsJSTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
- Label* detached_or_oob,
- Label* not_detached_nor_oob);
+ TNode<UintPtrT> LoadVariableLengthJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds);
+
+ void IsJSArrayBufferViewDetachedOrOutOfBounds(TNode<JSArrayBufferView> array,
+ Label* detached_or_oob,
+ Label* not_detached_nor_oob);
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
@@ -3629,7 +3640,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
- TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
+ TNode<CodeT> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
// If |data_type_out| is provided, the instance type of the function data will
@@ -3637,7 +3648,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// data_type_out will be set to 0.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<Code> GetSharedFunctionInfoCode(
+ TNode<CodeT> GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info,
TVariable<Uint16T>* data_type_out = nullptr,
Label* if_compile_lazy = nullptr);
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 861bd2904f..725f054c4e 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -136,12 +136,16 @@ bool HasOrigin(Isolate* isolate, Handle<SharedFunctionInfo> function_info,
return false;
}
- Handle<FixedArray> host_defined_options;
- if (!script_details.host_defined_options.ToHandle(&host_defined_options)) {
- host_defined_options = isolate->factory()->empty_fixed_array();
+ // TODO(cbruni, chromium:1244145): Remove once migrated to the context
+ Handle<Object> maybe_host_defined_options;
+ if (!script_details.host_defined_options.ToHandle(
+ &maybe_host_defined_options)) {
+ maybe_host_defined_options = isolate->factory()->empty_fixed_array();
}
-
- Handle<FixedArray> script_options(script->host_defined_options(), isolate);
+ Handle<FixedArray> host_defined_options =
+ Handle<FixedArray>::cast(maybe_host_defined_options);
+ Handle<FixedArray> script_options(
+ FixedArray::cast(script->host_defined_options()), isolate);
int length = host_defined_options->length();
if (length != script_options->length()) return false;
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index b7eafaf0d9..d603298897 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -36,7 +36,9 @@
#include "src/execution/local-isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/heap/local-heap-inl.h"
@@ -551,7 +553,7 @@ void InstallInterpreterTrampolineCopy(
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(*bytecode_array);
- interpreter_data->set_interpreter_trampoline(*code);
+ interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
shared_info->set_interpreter_data(*interpreter_data);
@@ -637,16 +639,18 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
SharedFunctionInfo shared_info) {
DCHECK_EQ(shared_info.language_mode(), literal->language_mode());
+ // These fields are all initialised in ParseInfo from the SharedFunctionInfo,
+ // and then set back on the literal after parse. Hence, they should already
+ // match.
+ DCHECK_EQ(shared_info.requires_instance_members_initializer(),
+ literal->requires_instance_members_initializer());
+ DCHECK_EQ(shared_info.class_scope_has_private_brand(),
+ literal->class_scope_has_private_brand());
+ DCHECK_EQ(shared_info.has_static_private_methods_or_accessors(),
+ literal->has_static_private_methods_or_accessors());
+
shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters());
shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
- if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
- shared_info.DisableOptimization(literal->dont_optimize_reason());
- }
-
- shared_info.set_class_scope_has_private_brand(
- literal->class_scope_has_private_brand());
- shared_info.set_has_static_private_methods_or_accessors(
- literal->has_static_private_methods_or_accessors());
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
@@ -683,7 +687,7 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
std::unique_ptr<UnoptimizedCompilationJob>
ExecuteSingleUnoptimizedCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
+ ParseInfo* parse_info, FunctionLiteral* literal, Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate) {
@@ -703,7 +707,8 @@ ExecuteSingleUnoptimizedCompilationJob(
#endif
std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
- parse_info, literal, allocator, eager_inner_literals, local_isolate));
+ parse_info, literal, script, allocator, eager_inner_literals,
+ local_isolate));
if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
// Compilation failed, return null.
@@ -713,33 +718,6 @@ ExecuteSingleUnoptimizedCompilationJob(
return job;
}
-bool RecursivelyExecuteUnoptimizedCompilationJobs(
- ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* function_jobs) {
- std::vector<FunctionLiteral*> eager_inner_literals;
-
- // We need to pass nullptr here because we are on the background
- // thread but don't have a LocalIsolate.
- DCHECK_NULL(LocalHeap::Current());
- std::unique_ptr<UnoptimizedCompilationJob> job =
- ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &eager_inner_literals, nullptr);
-
- if (!job) return false;
-
- // Recursively compile eager inner literals.
- for (FunctionLiteral* inner_literal : eager_inner_literals) {
- if (!RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, inner_literal, allocator, function_jobs)) {
- return false;
- }
- }
-
- function_jobs->emplace_front(std::move(job));
- return true;
-}
-
template <typename IsolateT>
bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
IsolateT* isolate, Handle<SharedFunctionInfo> outer_shared_info,
@@ -754,16 +732,28 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
std::vector<FunctionLiteral*> functions_to_compile;
functions_to_compile.push_back(parse_info->literal());
+ bool is_first = true;
while (!functions_to_compile.empty()) {
FunctionLiteral* literal = functions_to_compile.back();
functions_to_compile.pop_back();
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(literal, script, isolate);
+ Handle<SharedFunctionInfo> shared_info;
+ if (is_first) {
+ // We get the first SharedFunctionInfo directly as outer_shared_info
+ // rather than with Compiler::GetSharedFunctionInfo, to support
+ // placeholder SharedFunctionInfos that aren't on the script's SFI list.
+ DCHECK_EQ(literal->function_literal_id(),
+ outer_shared_info->function_literal_id());
+ shared_info = outer_shared_info;
+ is_first = false;
+ } else {
+ shared_info = Compiler::GetSharedFunctionInfo(literal, script, isolate);
+ }
+
if (shared_info->is_compiled()) continue;
std::unique_ptr<UnoptimizedCompilationJob> job =
- ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &functions_to_compile,
+ ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, script,
+ allocator, &functions_to_compile,
isolate->AsLocalIsolate());
if (!job) return false;
@@ -809,44 +799,6 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
return true;
}
-bool FinalizeAllUnoptimizedCompilationJobs(
- ParseInfo* parse_info, Isolate* isolate, Handle<Script> script,
- UnoptimizedCompilationJobList* compilation_jobs,
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data_list) {
- DCHECK(AllowCompilation::IsAllowed(isolate));
- DCHECK(!compilation_jobs->empty());
-
- // TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
- // rely on accessing native context during finalization.
-
- // Allocate scope infos for the literal.
- DeclarationScope::AllocateScopeInfos(parse_info, isolate);
-
- // Finalize the functions' compilation jobs.
- for (auto&& job : *compilation_jobs) {
- FunctionLiteral* literal = job->compilation_info()->literal();
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(literal, script, isolate);
- // The inner function might be compiled already if compiling for debug.
- if (shared_info->is_compiled()) continue;
- UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info);
- if (FinalizeSingleUnoptimizedCompilationJob(
- job.get(), shared_info, isolate,
- finalize_unoptimized_compilation_data_list) !=
- CompilationJob::SUCCEEDED) {
- return false;
- }
- }
-
- // Report any warnings generated during compilation.
- if (parse_info->pending_error_handler()->has_pending_warnings()) {
- parse_info->pending_error_handler()->PrepareWarnings(isolate);
- }
-
- return true;
-}
-
bool FinalizeDeferredUnoptimizedCompilationJobs(
Isolate* isolate, Handle<Script> script,
DeferredFinalizationJobDataList* deferred_jobs,
@@ -1072,9 +1024,9 @@ Handle<Code> ContinuationForConcurrentOptimization(
}
return handle(function->code(), isolate);
} else if (function->shared().HasBaselineCode()) {
- Code baseline_code = function->shared().baseline_code(kAcquireLoad);
+ CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
- return handle(baseline_code, isolate);
+ return handle(FromCodeT(baseline_code), isolate);
}
DCHECK(function->ActiveTierIsIgnition());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
@@ -1102,7 +1054,7 @@ MaybeHandle<Code> GetOptimizedCode(
if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
if (shared->optimization_disabled() &&
- shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
+ shared->disabled_optimization_reason() == BailoutReason::kNeverOptimize) {
return {};
}
@@ -1211,7 +1163,12 @@ bool PreparePendingException(IsolateT* isolate, ParseInfo* parse_info) {
bool FailWithPreparedPendingException(
Isolate* isolate, Handle<Script> script,
- const PendingCompilationErrorHandler* pending_error_handler) {
+ const PendingCompilationErrorHandler* pending_error_handler,
+ Compiler::ClearExceptionFlag flag = Compiler::KEEP_EXCEPTION) {
+ if (flag == Compiler::CLEAR_EXCEPTION) {
+ return FailAndClearPendingException(isolate);
+ }
+
if (!isolate->has_pending_exception()) {
if (pending_error_handler->has_pending_error()) {
pending_error_handler->ReportErrors(isolate, script);
@@ -1225,13 +1182,9 @@ bool FailWithPreparedPendingException(
bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
ParseInfo* parse_info,
Compiler::ClearExceptionFlag flag) {
- if (flag == Compiler::CLEAR_EXCEPTION) {
- return FailAndClearPendingException(isolate);
- }
-
PreparePendingException(isolate, parse_info);
- return FailWithPreparedPendingException(isolate, script,
- parse_info->pending_error_handler());
+ return FailWithPreparedPendingException(
+ isolate, script, parse_info->pending_error_handler(), flag);
}
void FinalizeUnoptimizedCompilation(
@@ -1293,24 +1246,6 @@ void FinalizeUnoptimizedScriptCompilation(
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- UnoptimizedCompileState::ParallelTasks* parallel_tasks =
- compile_state->parallel_tasks();
- if (parallel_tasks) {
- LazyCompileDispatcher* dispatcher = parallel_tasks->dispatcher();
- for (auto& it : *parallel_tasks) {
- FunctionLiteral* literal = it.first;
- LazyCompileDispatcher::JobId job_id = it.second;
- MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
- Script::FindSharedFunctionInfo(script, isolate, literal);
- Handle<SharedFunctionInfo> shared_for_task;
- if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
- dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
- } else {
- dispatcher->AbortJob(job_id);
- }
- }
- }
-
if (isolate->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(isolate, script);
}
@@ -1373,8 +1308,6 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
parse_info->flags().is_eval() ? "V8.CompileEval" : "V8.Compile");
- // Prepare and execute compilation of the outer-most function.
-
// Create the SharedFunctionInfo and add it to the script's list.
Handle<SharedFunctionInfo> shared_info =
CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
@@ -1382,6 +1315,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
FinalizeUnoptimizedCompilationDataList
finalize_unoptimized_compilation_data_list;
+ // Prepare and execute compilation of the outer-most function.
if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
isolate, shared_info, script, parse_info, isolate->allocator(),
is_compiled_scope, &finalize_unoptimized_compilation_data_list,
@@ -1418,57 +1352,6 @@ RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
}
#endif // V8_RUNTIME_CALL_STATS
-MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- Handle<Script> script, LocalIsolate* isolate,
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data_list,
- DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread,
- IsCompiledScope* is_compiled_scope) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RCS_SCOPE(parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
-
- Handle<SharedFunctionInfo> shared_info =
- CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
-
- if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
- isolate, shared_info, script, parse_info, allocator,
- is_compiled_scope, finalize_unoptimized_compilation_data_list,
- jobs_to_retry_finalization_on_main_thread)) {
- return kNullMaybeHandle;
- }
-
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
-
- return shared_info;
-}
-
-// TODO(leszeks): Remove this once off-thread finalization is always on.
-void CompileOnBackgroundThread(ParseInfo* parse_info,
- AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* jobs) {
- DisallowHeapAccess no_heap_access;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RCS_SCOPE(parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
-
- // Generate the unoptimized bytecode or asm-js data.
- DCHECK(jobs->empty());
-
- bool success = RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, parse_info->literal(), allocator, jobs);
-
- USE(success);
- DCHECK_EQ(success, !jobs->empty());
-
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
-}
-
} // namespace
CompilationHandleScope::~CompilationHandleScope() {
@@ -1494,204 +1377,327 @@ DeferredFinalizationJobData::DeferredFinalizationJobData(
BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate, ScriptType type)
- : flags_(UnoptimizedCompileFlags::ForToplevelCompile(
+ : isolate_for_local_isolate_(isolate),
+ flags_(UnoptimizedCompileFlags::ForToplevelCompile(
isolate, true, construct_language_mode(FLAG_use_strict),
REPLMode::kNo, type, FLAG_lazy_streaming)),
- compile_state_(isolate),
- info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
- isolate_for_local_isolate_(isolate),
- start_position_(0),
- end_position_(0),
- function_literal_id_(kFunctionLiteralIdTopLevel),
+ character_stream_(ScannerStream::For(streamed_data->source_stream.get(),
+ streamed_data->encoding)),
stack_size_(i::FLAG_stack_size),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
timer_(isolate->counters()->compile_script_on_background()),
- language_mode_(info_->language_mode()) {
+ start_position_(0),
+ end_position_(0),
+ function_literal_id_(kFunctionLiteralIdTopLevel),
+ language_mode_(flags_.outer_language_mode()) {
VMState<PARSER> state(isolate);
- // Prepare the data for the internalization phase and compilation phase, which
- // will happen in the main thread after parsing.
-
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
- info_->flags().script_id()));
-
- std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- streamed_data->source_stream.get(), streamed_data->encoding));
- info_->set_character_stream(std::move(stream));
+ flags_.script_id()));
}
BackgroundCompileTask::BackgroundCompileTask(
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size)
- : flags_(UnoptimizedCompileFlags::ForToplevelFunction(
- outer_parse_info->flags(), function_literal)),
- compile_state_(*outer_parse_info->state()),
- info_(ParseInfo::ForToplevelFunction(flags_, &compile_state_,
- function_literal, function_name)),
- isolate_for_local_isolate_(nullptr),
- start_position_(function_literal->start_position()),
- end_position_(function_literal->end_position()),
- function_literal_id_(function_literal->function_literal_id()),
+ : isolate_for_local_isolate_(isolate),
+ // TODO(leszeks): Create this from parent compile flags, to avoid
+ // accessing the Isolate.
+ flags_(
+ UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info)),
+ character_stream_(std::move(character_stream)),
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
timer_(timer),
- language_mode_(info_->language_mode()) {
- DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
- DCHECK_NULL(outer_parse_info->extension());
+ input_shared_info_(shared_info),
+ start_position_(shared_info->StartPosition()),
+ end_position_(shared_info->EndPosition()),
+ function_literal_id_(shared_info->function_literal_id()),
+ language_mode_(flags_.outer_language_mode()) {
+ DCHECK(!shared_info->is_toplevel());
- DCHECK(!function_literal->is_toplevel());
+ character_stream_->Seek(start_position_);
- // Clone the character stream so both can be accessed independently.
- std::unique_ptr<Utf16CharacterStream> character_stream =
- outer_parse_info->character_stream()->Clone();
- character_stream->Seek(start_position_);
- info_->set_character_stream(std::move(character_stream));
-
- // Get preparsed scope data from the function literal.
- if (function_literal->produced_preparse_data()) {
- ZonePreparseData* serialized_data =
- function_literal->produced_preparse_data()->Serialize(info_->zone());
- info_->set_consumed_preparse_data(
- ConsumedPreparseData::For(info_->zone(), serialized_data));
- }
+ // Get the script out of the outer ParseInfo and turn it into a persistent
+ // handle we can transfer to the background thread.
+ persistent_handles_ = std::make_unique<PersistentHandles>(isolate);
+ input_shared_info_ = persistent_handles_->NewHandle(shared_info);
}
BackgroundCompileTask::~BackgroundCompileTask() = default;
namespace {
-// A scope object that ensures a parse info's runtime call stats and stack limit
-// are set correctly during worker-thread compile, and restores it after going
-// out of scope.
-class V8_NODISCARD OffThreadParseInfoScope {
- public:
- OffThreadParseInfoScope(
- ParseInfo* parse_info,
- WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, int stack_size)
- : parse_info_(parse_info),
- original_stack_limit_(parse_info_->stack_limit()),
- original_runtime_call_stats_(parse_info_->runtime_call_stats()),
- worker_thread_scope_(worker_thread_runtime_stats) {
- parse_info_->SetPerThreadState(GetCurrentStackPosition() - stack_size * KB,
- worker_thread_scope_.Get());
+void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
+ ScriptDetails script_details,
+ DisallowGarbageCollection* no_gc) {
+ Handle<Object> script_name;
+ if (script_details.name_obj.ToHandle(&script_name)) {
+ script.set_name(*script_name);
+ script.set_line_offset(script_details.line_offset);
+ script.set_column_offset(script_details.column_offset);
}
-
- OffThreadParseInfoScope(const OffThreadParseInfoScope&) = delete;
- OffThreadParseInfoScope& operator=(const OffThreadParseInfoScope&) = delete;
-
- ~OffThreadParseInfoScope() {
- DCHECK_NOT_NULL(parse_info_);
- parse_info_->SetPerThreadState(original_stack_limit_,
- original_runtime_call_stats_);
+ // The API can provide a source map URL, but a source map URL could also have
+ // been inferred by the parser from a magic comment. The latter takes
+ // preference over the former, so we don't want to override the source mapping
+ // URL if it already exists.
+ Handle<Object> source_map_url;
+ if (script_details.source_map_url.ToHandle(&source_map_url) &&
+ script.source_mapping_url(isolate).IsUndefined(isolate)) {
+ script.set_source_mapping_url(*source_map_url);
}
-
- private:
- ParseInfo* parse_info_;
- uintptr_t original_stack_limit_;
- RuntimeCallStats* original_runtime_call_stats_;
- WorkerThreadRuntimeCallStatsScope worker_thread_scope_;
-};
+ Handle<Object> host_defined_options;
+ if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
+ // TODO(cbruni, chromium:1244145): Remove once migrated to the context.
+ if (host_defined_options->IsFixedArray()) {
+ script.set_host_defined_options(FixedArray::cast(*host_defined_options));
+ }
+ }
+}
} // namespace
void BackgroundCompileTask::Run() {
+ WorkerThreadRuntimeCallStatsScope worker_thread_scope(
+ worker_thread_runtime_call_stats_);
+
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground,
+ worker_thread_scope.Get());
+ UnparkedScope unparked_scope(&isolate);
+ LocalHandleScope handle_scope(&isolate);
+
+ ReusableUnoptimizedCompileState reusable_state(&isolate);
+
+ Run(&isolate, &reusable_state);
+}
+
+void BackgroundCompileTask::Run(
+ LocalIsolate* isolate, ReusableUnoptimizedCompileState* reusable_state) {
TimedHistogramScope timer(timer_);
- base::Optional<OffThreadParseInfoScope> off_thread_scope(
- base::in_place, info_.get(), worker_thread_runtime_call_stats_,
- stack_size_);
+
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
- RCS_SCOPE(info_->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundCompileTask);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBackgroundCompileTask);
+
+ bool toplevel_script_compilation = flags_.is_toplevel();
+
+ ParseInfo info(isolate, flags_, &compile_state_, reusable_state,
+ GetCurrentStackPosition() - stack_size_ * KB);
+ info.set_character_stream(std::move(character_stream_));
+
+ if (toplevel_script_compilation) {
+ DCHECK_NULL(persistent_handles_);
+ DCHECK(input_shared_info_.is_null());
+
+ // We don't have the script source, origin, or details yet, so use default
+ // values for them. These will be fixed up during the main-thread merge.
+ Handle<Script> script = info.CreateScript(
+ isolate, isolate->factory()->empty_string(), kNullMaybeHandle,
+ ScriptOriginOptions(false, false, false, info.flags().is_module()));
+ script_ = isolate->heap()->NewPersistentHandle(script);
+ } else {
+ DCHECK_NOT_NULL(persistent_handles_);
+ isolate->heap()->AttachPersistentHandles(std::move(persistent_handles_));
+ Handle<SharedFunctionInfo> shared_info =
+ input_shared_info_.ToHandleChecked();
+ script_ = isolate->heap()->NewPersistentHandle(
+ Script::cast(shared_info->script()));
+ info.CheckFlagsForFunctionFromScript(*script_);
+
+ {
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ info.set_function_name(info.ast_value_factory()->GetString(
+ shared_info->Name(), access_guard));
+ }
+
+ // Get preparsed scope data from the function literal.
+ if (shared_info->HasUncompiledDataWithPreparseData()) {
+ info.set_consumed_preparse_data(ConsumedPreparseData::For(
+ isolate, handle(shared_info->uncompiled_data_with_preparse_data()
+ .preparse_data(isolate),
+ isolate)));
+ }
+ }
// Update the character stream's runtime call stats.
- info_->character_stream()->set_runtime_call_stats(
- info_->runtime_call_stats());
+ info.character_stream()->set_runtime_call_stats(info.runtime_call_stats());
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
- parser_.reset(new Parser(info_.get()));
- parser_->InitializeEmptyScopeChain(info_.get());
+ Parser parser(isolate, &info, script_);
+ if (flags().is_toplevel()) {
+ parser.InitializeEmptyScopeChain(&info);
+ } else {
+ // TODO(leszeks): Consider keeping Scope zones alive between compile tasks
+ // and passing the Scope for the FunctionLiteral through here directly
+ // without copying/deserializing.
+ Handle<SharedFunctionInfo> shared_info =
+ input_shared_info_.ToHandleChecked();
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
+ if (shared_info->HasOuterScopeInfo()) {
+ maybe_outer_scope_info =
+ handle(shared_info->GetOuterScopeInfo(), isolate);
+ }
+ parser.DeserializeScopeChain(
+ isolate, &info, maybe_outer_scope_info,
+ Scope::DeserializationMode::kIncludingVariables);
+ }
- parser_->ParseOnBackground(info_.get(), start_position_, end_position_,
- function_literal_id_);
+ parser.ParseOnBackground(isolate, &info, start_position_, end_position_,
+ function_literal_id_);
+ parser.UpdateStatistics(script_, &use_counts_, &total_preparse_skipped_);
// Save the language mode.
- language_mode_ = info_->language_mode();
+ language_mode_ = info.language_mode();
- if (!FLAG_finalize_streaming_on_background) {
- if (info_->literal() != nullptr) {
- CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
- &compilation_jobs_);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RCS_SCOPE(info.runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(&info));
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (info.literal() != nullptr) {
+ Handle<SharedFunctionInfo> shared_info;
+ if (toplevel_script_compilation) {
+ shared_info = CreateTopLevelSharedFunctionInfo(&info, script_, isolate);
+ } else {
+ // Clone into a placeholder SFI for storing the results.
+ shared_info = isolate->factory()->CloneSharedFunctionInfo(
+ input_shared_info_.ToHandleChecked());
}
- } else {
- DCHECK(info_->flags().is_toplevel());
- {
- LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
- UnparkedScope unparked_scope(&isolate);
- LocalHandleScope handle_scope(&isolate);
-
- info_->ast_value_factory()->Internalize(&isolate);
-
- // We don't have the script source, origin, or details yet, so use default
- // values for them. These will be fixed up during the main-thread merge.
- Handle<Script> script = info_->CreateScript(
- &isolate, isolate.factory()->empty_string(), kNullMaybeHandle,
- ScriptOriginOptions(false, false, false, info_->flags().is_module()));
-
- parser_->UpdateStatistics(script, use_counts_, &total_preparse_skipped_);
- parser_->HandleSourceURLComments(&isolate, script);
-
- MaybeHandle<SharedFunctionInfo> maybe_result;
- if (info_->literal() != nullptr) {
- maybe_result = CompileAndFinalizeOnBackgroundThread(
- info_.get(), compile_state_.allocator(), script, &isolate,
- &finalize_unoptimized_compilation_data_,
- &jobs_to_retry_finalization_on_main_thread_, &is_compiled_scope_);
- } else {
- DCHECK(compile_state_.pending_error_handler()->has_pending_error());
- PreparePendingException(&isolate, info_.get());
- }
+ if (IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ isolate, shared_info, script_, &info, reusable_state->allocator(),
+ &is_compiled_scope_, &finalize_unoptimized_compilation_data_,
+ &jobs_to_retry_finalization_on_main_thread_)) {
+ maybe_result = shared_info;
+ }
+ }
- outer_function_sfi_ =
- isolate.heap()->NewPersistentMaybeHandle(maybe_result);
- script_ = isolate.heap()->NewPersistentHandle(script);
+ if (maybe_result.is_null()) {
+ PreparePendingException(isolate, &info);
+ }
- persistent_handles_ = isolate.heap()->DetachPersistentHandles();
- }
+ outer_function_sfi_ = isolate->heap()->NewPersistentMaybeHandle(maybe_result);
+ DCHECK(isolate->heap()->ContainsPersistentHandle(script_.location()));
+ persistent_handles_ = isolate->heap()->DetachPersistentHandles();
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.ReleaseParser");
- DCHECK_EQ(language_mode_, info_->language_mode());
- off_thread_scope.reset();
- parser_.reset();
- info_.reset();
- }
+ // Make sure the language mode didn't change.
+ DCHECK_EQ(language_mode_, info.language_mode());
+}
+
+MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details) {
+ ScriptOriginOptions origin_options = script_details.origin_options;
+
+ DCHECK(flags_.is_toplevel());
+ DCHECK_EQ(flags_.is_module(), origin_options.IsModule());
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+
+ // We might not have been able to finalize all jobs on the background
+ // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
+ if (FinalizeDeferredUnoptimizedCompilationJobs(
+ isolate, script_, &jobs_to_retry_finalization_on_main_thread_,
+ compile_state_.pending_error_handler(),
+ &finalize_unoptimized_compilation_data_)) {
+ maybe_result = outer_function_sfi_;
}
+
+ script_->set_source(*source);
+ script_->set_origin_options(origin_options);
+
+ // The one post-hoc fix-up: Add the script to the script list.
+ Handle<WeakArrayList> scripts = isolate->factory()->script_list();
+ scripts =
+ WeakArrayList::Append(isolate, scripts, MaybeObjectHandle::Weak(script_));
+ isolate->heap()->SetRootScriptList(*scripts);
+
+ // Set the script fields after finalization, to keep this path the same
+ // between main-thread and off-thread finalization.
+ {
+ DisallowGarbageCollection no_gc;
+ SetScriptFieldsFromDetails(isolate, *script_, script_details, &no_gc);
+ LOG(isolate, ScriptDetails(*script_));
+ }
+
+ ReportStatistics(isolate);
+
+ Handle<SharedFunctionInfo> result;
+ if (!maybe_result.ToHandle(&result)) {
+ FailWithPreparedPendingException(isolate, script_,
+ compile_state_.pending_error_handler());
+ return kNullMaybeHandle;
+ }
+
+ FinalizeUnoptimizedScriptCompilation(isolate, script_, flags_,
+ &compile_state_,
+ finalize_unoptimized_compilation_data_);
+
+ return handle(*result, isolate);
}
-MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::GetOuterFunctionSfi(
- Isolate* isolate) {
- // outer_function_sfi_ is a persistent Handle, tied to the lifetime of the
- // persistent_handles_ member, so create a new Handle to let it outlive
- // the BackgroundCompileTask.
+bool BackgroundCompileTask::FinalizeFunction(
+ Isolate* isolate, Compiler::ClearExceptionFlag flag) {
+ DCHECK(!flags_.is_toplevel());
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ Handle<SharedFunctionInfo> input_shared_info =
+ input_shared_info_.ToHandleChecked();
+
+ // The UncompiledData on the input SharedFunctionInfo will have a pointer to
+ // the LazyCompileDispatcher Job that launched this task, which will now be
+ // considered complete, so clear that regardless of whether the finalize
+ // succeeds or not.
+ input_shared_info->ClearUncompiledDataJobPointer();
+
+ // We might not have been able to finalize all jobs on the background
+ // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
+ if (FinalizeDeferredUnoptimizedCompilationJobs(
+ isolate, script_, &jobs_to_retry_finalization_on_main_thread_,
+ compile_state_.pending_error_handler(),
+ &finalize_unoptimized_compilation_data_)) {
+ maybe_result = outer_function_sfi_;
+ }
+
+ ReportStatistics(isolate);
+
Handle<SharedFunctionInfo> result;
- if (outer_function_sfi_.ToHandle(&result)) {
- return handle(*result, isolate);
+ if (!maybe_result.ToHandle(&result)) {
+ FailWithPreparedPendingException(
+ isolate, script_, compile_state_.pending_error_handler(), flag);
+ return false;
}
- return kNullMaybeHandle;
+
+ FinalizeUnoptimizedCompilation(isolate, script_, flags_, &compile_state_,
+ finalize_unoptimized_compilation_data_);
+
+ // Move the compiled data from the placeholder SFI back to the real SFI.
+ input_shared_info->CopyFrom(*result);
+
+ return true;
}
-Handle<Script> BackgroundCompileTask::GetScript(Isolate* isolate) {
- // script_ is a persistent Handle, tied to the lifetime of the
- // persistent_handles_ member, so create a new Handle to let it outlive
- // the BackgroundCompileTask.
- return handle(*script_, isolate);
+void BackgroundCompileTask::AbortFunction() {
+ // The UncompiledData on the input SharedFunctionInfo will have a pointer to
+ // the LazyCompileDispatcher Job that launched this task, which is about to be
+ // deleted, so clear that to avoid the SharedFunctionInfo from pointing to
+ // deallocated memory.
+ input_shared_info_.ToHandleChecked()->ClearUncompiledDataJobPointer();
+}
+
+void BackgroundCompileTask::ReportStatistics(Isolate* isolate) {
+ // Update use-counts.
+ for (auto feature : use_counts_) {
+ isolate->CountUsage(feature);
+ }
+ if (total_preparse_skipped_ > 0) {
+ isolate->counters()->total_preparse_skipped()->Increment(
+ total_preparse_skipped_);
+ }
}
BackgroundDeserializeTask::BackgroundDeserializeTask(
@@ -1777,9 +1783,13 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
flags.set_collect_source_positions(true);
+ // Prevent parallel tasks from being spawned by this job.
+ flags.set_post_parallel_compile_tasks_for_eager_toplevel(false);
+ flags.set_post_parallel_compile_tasks_for_lazy(false);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
@@ -1830,7 +1840,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// static
bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
ClearExceptionFlag flag,
- IsCompiledScope* is_compiled_scope) {
+ IsCompiledScope* is_compiled_scope,
+ CreateSourcePositions create_source_positions_flag) {
// We should never reach here if the function is already compiled.
DCHECK(!shared_info->is_compiled());
DCHECK(!is_compiled_scope->is_compiled());
@@ -1851,9 +1862,13 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
// Set up parse info.
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
+ if (create_source_positions_flag == CreateSourcePositions::kYes) {
+ flags.set_collect_source_positions(true);
+ }
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
// Check if the compiler dispatcher has shared_info enqueued for compile.
LazyCompileDispatcher* dispatcher = isolate->lazy_compile_dispatcher();
@@ -1927,7 +1942,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
DCHECK(is_compiled_scope->is_compiled());
- Handle<Code> code = handle(shared_info->GetCode(), isolate);
+ Handle<Code> code = handle(FromCodeT(shared_info->GetCode()), isolate);
// Initialize the feedback cell for this JSFunction and reset the interrupt
// budget for feedback vector allocation even if there is a closure feedback
@@ -2011,7 +2026,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
- shared->set_baseline_code(*code, kReleaseStore);
+ shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
@@ -2045,7 +2060,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- Code baseline_code = shared->baseline_code(kAcquireLoad);
+ CodeT baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
@@ -2061,45 +2076,19 @@ MaybeHandle<SharedFunctionInfo> Compiler::CompileToplevel(
}
// static
-bool Compiler::FinalizeBackgroundCompileTask(
- BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ClearExceptionFlag flag) {
- DCHECK(!FLAG_finalize_streaming_on_background);
-
+bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
+ Isolate* isolate,
+ ClearExceptionFlag flag) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
RCS_SCOPE(isolate,
RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
- HandleScope scope(isolate);
- ParseInfo* parse_info = task->info();
- DCHECK(!parse_info->flags().is_toplevel());
- DCHECK(!shared_info->is_compiled());
-
- Handle<Script> script(Script::cast(shared_info->script()), isolate);
- parse_info->CheckFlagsForFunctionFromScript(*script);
-
- task->parser()->UpdateStatistics(isolate, script);
- task->parser()->HandleSourceURLComments(isolate, script);
- if (task->compilation_jobs()->empty()) {
- // Parsing or compile failed on background thread - report error messages.
- return FailWithPendingException(isolate, script, parse_info, flag);
- }
+ HandleScope scope(isolate);
- // Parsing has succeeded - finalize compilation.
- parse_info->ast_value_factory()->Internalize(isolate);
- if (!FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, script, task->compilation_jobs(),
- task->finalize_unoptimized_compilation_data())) {
- // Finalization failed - throw an exception.
- return FailWithPendingException(isolate, script, parse_info, flag);
- }
- FinalizeUnoptimizedCompilation(
- isolate, script, parse_info->flags(), parse_info->state(),
- *task->finalize_unoptimized_compilation_data());
+ if (!task->FinalizeFunction(isolate, flag)) return false;
DCHECK(!isolate->has_pending_exception());
- DCHECK(shared_info->is_compiled());
return true;
}
@@ -2204,8 +2193,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
DCHECK(!flags.is_module());
flags.set_parse_restriction(restriction);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
parse_info.set_parameters_end_pos(parameters_end_pos);
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
@@ -2650,30 +2640,6 @@ struct ScriptCompileTimerScope {
}
};
-void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
- ScriptDetails script_details,
- DisallowGarbageCollection* no_gc) {
- Handle<Object> script_name;
- if (script_details.name_obj.ToHandle(&script_name)) {
- script.set_name(*script_name);
- script.set_line_offset(script_details.line_offset);
- script.set_column_offset(script_details.column_offset);
- }
- // The API can provide a source map URL, but a source map URL could also have
- // been inferred by the parser from a magic comment. The latter takes
- // preference over the former, so we don't want to override the source mapping
- // URL if it already exists.
- Handle<Object> source_map_url;
- if (script_details.source_map_url.ToHandle(&source_map_url) &&
- script.source_mapping_url(isolate).IsUndefined(isolate)) {
- script.set_source_mapping_url(*source_map_url);
- }
- Handle<FixedArray> host_defined_options;
- if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
- script.set_host_defined_options(*host_defined_options);
- }
-}
-
Handle<Script> NewScript(
Isolate* isolate, ParseInfo* parse_info, Handle<String> source,
ScriptDetails script_details, NativesFlag natives,
@@ -2693,8 +2659,9 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnMainThread(
const ScriptDetails& script_details, NativesFlag natives,
v8::Extension* extension, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
parse_info.set_extension(extension);
Handle<Script> script =
@@ -2867,7 +2834,8 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
if (V8_UNLIKELY(
i::FLAG_experimental_web_snapshots &&
- (source->IsExternalOneByteString() || source->IsSeqOneByteString()) &&
+ (source->IsExternalOneByteString() || source->IsSeqOneByteString() ||
+ source->IsExternalTwoByteString() || source->IsSeqTwoByteString()) &&
source_length > 4)) {
// Experimental: Treat the script as a web snapshot if it starts with the
// magic byte sequence. TODO(v8:11525): Remove this once proper embedder
@@ -3080,8 +3048,9 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
flags.set_collect_source_positions(true);
// flags.set_eager(compile_options == ScriptCompiler::kEagerCompile);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (!context->IsNativeContext()) {
@@ -3123,8 +3092,7 @@ MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, ScriptStreamingData* streaming_data) {
- ScriptOriginOptions origin_options = script_details.origin_options;
- DCHECK(!origin_options.IsWasm());
+ DCHECK(!script_details.origin_options.IsWasm());
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
@@ -3153,94 +3121,15 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
if (maybe_result.is_null()) {
// No cache entry found, finalize compilation of the script and add it to
// the isolate cache.
- DCHECK_EQ(task->flags().is_module(), origin_options.IsModule());
-
- Handle<Script> script;
- if (FLAG_finalize_streaming_on_background) {
- RCS_SCOPE(isolate,
- RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish");
-
- script = task->GetScript(isolate);
-
- // We might not have been able to finalize all jobs on the background
- // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
- if (FinalizeDeferredUnoptimizedCompilationJobs(
- isolate, script,
- task->jobs_to_retry_finalization_on_main_thread(),
- task->compile_state()->pending_error_handler(),
- task->finalize_unoptimized_compilation_data())) {
- maybe_result = task->GetOuterFunctionSfi(isolate);
- }
-
- script->set_source(*source);
- script->set_origin_options(origin_options);
-
- // The one post-hoc fix-up: Add the script to the script list.
- Handle<WeakArrayList> scripts = isolate->factory()->script_list();
- scripts = WeakArrayList::Append(isolate, scripts,
- MaybeObjectHandle::Weak(script));
- isolate->heap()->SetRootScriptList(*scripts);
-
- for (int i = 0;
- i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); ++i) {
- v8::Isolate::UseCounterFeature feature =
- static_cast<v8::Isolate::UseCounterFeature>(i);
- isolate->CountUsage(feature, task->use_count(feature));
- }
- isolate->counters()->total_preparse_skipped()->Increment(
- task->total_preparse_skipped());
- } else {
- ParseInfo* parse_info = task->info();
- DCHECK_EQ(parse_info->flags().is_module(), origin_options.IsModule());
- DCHECK(parse_info->flags().is_toplevel());
-
- script = parse_info->CreateScript(isolate, source, kNullMaybeHandle,
- origin_options);
-
- task->parser()->UpdateStatistics(isolate, script);
- task->parser()->HandleSourceURLComments(isolate, script);
-
- if (!task->compilation_jobs()->empty()) {
- // Off-thread parse & compile has succeeded - finalize compilation.
- DCHECK_NOT_NULL(parse_info->literal());
-
- parse_info->ast_value_factory()->Internalize(isolate);
-
- Handle<SharedFunctionInfo> shared_info =
- CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
- if (FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, script, task->compilation_jobs(),
- task->finalize_unoptimized_compilation_data())) {
- maybe_result = shared_info;
- }
- }
-
- if (maybe_result.is_null()) {
- // Compilation failed - prepare to throw an exception after script
- // fields have been set.
- PreparePendingException(isolate, parse_info);
- }
- }
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish");
- // Set the script fields after finalization, to keep this path the same
- // between main-thread and off-thread finalization.
- {
- DisallowGarbageCollection no_gc;
- SetScriptFieldsFromDetails(isolate, *script, script_details, &no_gc);
- LOG(isolate, ScriptDetails(*script));
- }
+ maybe_result = task->FinalizeScript(isolate, source, script_details);
Handle<SharedFunctionInfo> result;
- if (!maybe_result.ToHandle(&result)) {
- FailWithPreparedPendingException(
- isolate, script, task->compile_state()->pending_error_handler());
- } else {
- FinalizeUnoptimizedScriptCompilation(
- isolate, script, task->flags(), task->compile_state(),
- *task->finalize_unoptimized_compilation_data());
-
+ if (maybe_result.ToHandle(&result)) {
// Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
@@ -3252,7 +3141,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
"V8.StreamingFinalization.Release");
streaming_data->Release();
return maybe_result;
-}
+} // namespace internal
// static
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForWebSnapshot(
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 5298d139ff..f49bd727bc 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -8,7 +8,9 @@
#include <forward_list>
#include <memory>
+#include "src/ast/ast-value-factory.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/small-vector.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -69,7 +71,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
- IsCompiledScope* is_compiled_scope);
+ IsCompiledScope* is_compiled_scope,
+ CreateSourcePositions create_source_positions_flag =
+ CreateSourcePositions::kNo);
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
@@ -104,9 +108,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Isolate* isolate);
// Finalize and install code from previously run background compile task.
- static bool FinalizeBackgroundCompileTask(
- BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ClearExceptionFlag flag);
+ static bool FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
+ Isolate* isolate,
+ ClearExceptionFlag flag);
// Finalize and install optimized code from previously run job.
static bool FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
@@ -495,8 +499,7 @@ using DeferredFinalizationJobDataList =
class V8_EXPORT_PRIVATE BackgroundCompileTask {
public:
// Creates a new task that when run will parse and compile the streamed
- // script associated with |data| and can be finalized with
- // Compiler::GetSharedFunctionInfoForStreamedScript.
+ // script associated with |data| and can be finalized with FinalizeScript.
// Note: does not take ownership of |data|.
BackgroundCompileTask(ScriptStreamingData* data, Isolate* isolate,
v8::ScriptType type);
@@ -504,83 +507,61 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
BackgroundCompileTask& operator=(const BackgroundCompileTask&) = delete;
~BackgroundCompileTask();
- // Creates a new task that when run will parse and compile the
- // |function_literal| and can be finalized with
+ // Creates a new task that when run will parse and compile the top-level
+ // |shared_info| and can be finalized with FinalizeFunction in
// Compiler::FinalizeBackgroundCompileTask.
BackgroundCompileTask(
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size);
void Run();
+ void Run(LocalIsolate* isolate,
+ ReusableUnoptimizedCompileState* reusable_state);
- ParseInfo* info() {
- DCHECK_NOT_NULL(info_);
- return info_.get();
- }
- Parser* parser() { return parser_.get(); }
- UnoptimizedCompilationJobList* compilation_jobs() {
- return &compilation_jobs_;
- }
- UnoptimizedCompileFlags flags() const { return flags_; }
- UnoptimizedCompileState* compile_state() { return &compile_state_; }
- LanguageMode language_mode() { return language_mode_; }
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data() {
- return &finalize_unoptimized_compilation_data_;
- }
+ MaybeHandle<SharedFunctionInfo> FinalizeScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details);
- int use_count(v8::Isolate::UseCounterFeature feature) const {
- return use_counts_[static_cast<int>(feature)];
- }
- int total_preparse_skipped() const { return total_preparse_skipped_; }
+ bool FinalizeFunction(Isolate* isolate, Compiler::ClearExceptionFlag flag);
- // Jobs which could not be finalized in the background task, and need to be
- // finalized on the main thread.
- DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread() {
- return &jobs_to_retry_finalization_on_main_thread_;
- }
+ void AbortFunction();
- // Getters for the off-thread finalization results, that create main-thread
- // handles to the objects.
- MaybeHandle<SharedFunctionInfo> GetOuterFunctionSfi(Isolate* isolate);
- Handle<Script> GetScript(Isolate* isolate);
+ UnoptimizedCompileFlags flags() const { return flags_; }
+ LanguageMode language_mode() const { return language_mode_; }
private:
- // Data needed for parsing, and data needed to to be passed between thread
- // between parsing and compilation. These need to be initialized before the
- // compilation starts.
+ void ReportStatistics(Isolate* isolate);
+
+ void ClearFunctionJobPointer();
+
+ // Data needed for parsing and compilation. These need to be initialized
+ // before the compilation starts.
+ Isolate* isolate_for_local_isolate_;
UnoptimizedCompileFlags flags_;
UnoptimizedCompileState compile_state_;
- std::unique_ptr<ParseInfo> info_;
- std::unique_ptr<Parser> parser_;
-
- // Data needed for finalizing compilation after background compilation.
- UnoptimizedCompilationJobList compilation_jobs_;
+ std::unique_ptr<Utf16CharacterStream> character_stream_;
+ int stack_size_;
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
+ TimedHistogram* timer_;
// Data needed for merging onto the main thread after background finalization.
- // TODO(leszeks): When these are available, the above fields are not. We
- // should add some stricter type-safety or DCHECKs to ensure that the user of
- // the task knows this.
- Isolate* isolate_for_local_isolate_;
std::unique_ptr<PersistentHandles> persistent_handles_;
MaybeHandle<SharedFunctionInfo> outer_function_sfi_;
Handle<Script> script_;
IsCompiledScope is_compiled_scope_;
FinalizeUnoptimizedCompilationDataList finalize_unoptimized_compilation_data_;
DeferredFinalizationJobDataList jobs_to_retry_finalization_on_main_thread_;
- int use_counts_[v8::Isolate::kUseCounterFeatureCount] = {0};
+ base::SmallVector<v8::Isolate::UseCounterFeature, 8> use_counts_;
int total_preparse_skipped_ = 0;
// Single function data for top-level function compilation.
+ MaybeHandle<SharedFunctionInfo> input_shared_info_;
int start_position_;
int end_position_;
int function_literal_id_;
- int stack_size_;
- WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
- TimedHistogram* timer_;
LanguageMode language_mode_;
};
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index b2d890c6f4..76956381a2 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -24,13 +24,13 @@ class ConstantPoolEntry {
public:
ConstantPoolEntry() = default;
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value),
rmode_(rmode) {}
ConstantPoolEntry(int position, base::Double value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: position_(position),
merged_index_(SHARING_ALLOWED),
value64_(value.AsUint64()),
@@ -168,11 +168,11 @@ class ConstantPoolBuilder {
class ConstantPoolKey {
public:
explicit ConstantPoolKey(uint64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: is_value32_(false), value64_(value), rmode_(rmode) {}
explicit ConstantPoolKey(uint32_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: is_value32_(true), value32_(value), rmode_(rmode) {}
uint64_t value64() const {
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 3cdae6d4c8..e80d560fd1 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -26,7 +26,7 @@ enum CpuFeature {
BMI2,
LZCNT,
POPCNT,
- ATOM,
+ INTEL_ATOM,
#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
diff --git a/deps/v8/src/codegen/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc
index 0a22fbdd75..d07f021a8b 100644
--- a/deps/v8/src/codegen/external-reference-table.cc
+++ b/deps/v8/src/codegen/external-reference-table.cc
@@ -290,9 +290,11 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
}
Address ExternalReferenceTable::GetStatsCounterAddress(StatsCounter* counter) {
- int* address = counter->Enabled()
- ? counter->GetInternalPointer()
- : reinterpret_cast<int*>(&dummy_stats_counter_);
+ if (!counter->Enabled()) {
+ return reinterpret_cast<Address>(&dummy_stats_counter_);
+ }
+ std::atomic<int>* address = counter->GetInternalPointer();
+ STATIC_ASSERT(sizeof(address) == sizeof(Address));
return reinterpret_cast<Address>(address);
}
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 1981e29911..075eaf8c09 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -4,6 +4,7 @@
#include "src/codegen/external-reference.h"
+#include "include/v8-fast-api-calls.h"
#include "src/api/api.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
@@ -11,10 +12,11 @@
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/encoded-c-signature.h"
#include "src/execution/isolate-utils.h"
#include "src/execution/isolate.h"
#include "src/execution/microtask-queue.h"
-#include "src/execution/simulator-base.h"
+#include "src/execution/simulator.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/ic/stub-cache.h"
@@ -173,8 +175,18 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
}
// static
+ExternalReference ExternalReference::Create(ApiFunction* fun, Type type) {
+ return ExternalReference(Redirect(fun->address(), type));
+}
+
+// static
ExternalReference ExternalReference::Create(
- ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL) {
+ Isolate* isolate, ApiFunction* fun, Type type, Address* c_functions,
+ const CFunctionInfo* const* c_signatures, unsigned num_functions) {
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ isolate->simulator_data()->RegisterFunctionsAndSignatures(
+ c_functions, c_signatures, num_functions);
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
return ExternalReference(Redirect(fun->address(), type));
}
@@ -198,16 +210,23 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
-ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
+ExternalReference ExternalReference::builtins_table(Isolate* isolate) {
return ExternalReference(isolate->builtin_table());
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+ExternalReference ExternalReference::builtins_code_data_container_table(
+ Isolate* isolate) {
+ return ExternalReference(isolate->builtin_code_data_container_table());
+}
+#endif // V8_EXTERNAL_CODE_SPACE
+
ExternalReference ExternalReference::handle_scope_implementer_address(
Isolate* isolate) {
return ExternalReference(isolate->handle_scope_implementer_address());
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_CAGED_POINTERS
ExternalReference ExternalReference::virtual_memory_cage_base_address() {
return ExternalReference(GetProcessWideVirtualMemoryCage()->base_address());
}
@@ -215,7 +234,13 @@ ExternalReference ExternalReference::virtual_memory_cage_base_address() {
ExternalReference ExternalReference::virtual_memory_cage_end_address() {
return ExternalReference(GetProcessWideVirtualMemoryCage()->end_address());
}
-#endif
+
+ExternalReference ExternalReference::empty_backing_store_buffer() {
+ return ExternalReference(GetProcessWideVirtualMemoryCage()
+ ->constants()
+ .empty_backing_store_buffer_address());
+}
+#endif // V8_CAGED_POINTERS
#ifdef V8_HEAP_SANDBOX
ExternalReference ExternalReference::external_pointer_table_address(
@@ -871,8 +896,7 @@ ExternalReference ExternalReference::search_string_raw() {
FUNCTION_REFERENCE(jsarray_array_join_concat_to_sequential_string,
JSArray::ArrayJoinConcatToSequentialString)
-FUNCTION_REFERENCE(length_tracking_gsab_backed_typed_array_length,
- JSTypedArray::LengthTrackingGsabBackedTypedArrayLength)
+FUNCTION_REFERENCE(gsab_byte_length, JSArrayBuffer::GsabByteLength)
ExternalReference ExternalReference::search_string_raw_one_one() {
return search_string_raw<const uint8_t, const uint8_t>();
@@ -1001,6 +1025,17 @@ ExternalReference ExternalReference::intl_to_latin1_lower_table() {
uint8_t* ptr = const_cast<uint8_t*>(Intl::ToLatin1LowerTable());
return ExternalReference(reinterpret_cast<Address>(ptr));
}
+
+ExternalReference ExternalReference::intl_ascii_collation_weights_l1() {
+ uint8_t* ptr = const_cast<uint8_t*>(Intl::AsciiCollationWeightsL1());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+
+ExternalReference ExternalReference::intl_ascii_collation_weights_l3() {
+ uint8_t* ptr = const_cast<uint8_t*>(Intl::AsciiCollationWeightsL3());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+
#endif // V8_INTL_SUPPORT
// Explicit instantiations for all combinations of 1- and 2-byte strings.
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index d7cffa966b..a0c27d207e 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -11,6 +11,7 @@
namespace v8 {
class ApiFunction;
+class CFunctionInfo;
namespace internal {
@@ -24,7 +25,7 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(V) \
V(isolate_address, "isolate") \
- V(builtins_address, "builtins") \
+ V(builtins_table, "builtins_table") \
V(handle_scope_implementer_address, \
"Isolate::handle_scope_implementer_address") \
V(address_of_interpreter_entry_trampoline_instruction_start, \
@@ -78,8 +79,16 @@ class StatsCounter;
V(thread_in_wasm_flag_address_address, \
"Isolate::thread_in_wasm_flag_address_address") \
V(javascript_execution_assert, "javascript_execution_assert") \
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+#ifdef V8_EXTERNAL_CODE_SPACE
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
+ V(builtins_code_data_container_table, "builtins_code_data_container_table")
+#else
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V)
+#endif // V8_EXTERNAL_CODE_SPACE
+
#ifdef V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V) \
V(external_pointer_table_address, \
@@ -126,6 +135,7 @@ class StatsCounter;
V(f64_mod_wrapper_function, "f64_mod_wrapper") \
V(get_date_field_function, "JSDate::GetField") \
V(get_or_create_hash_raw, "get_or_create_hash_raw") \
+ V(gsab_byte_length, "GsabByteLength") \
V(ieee754_acos_function, "base::ieee754::acos") \
V(ieee754_acosh_function, "base::ieee754::acosh") \
V(ieee754_asin_function, "base::ieee754::asin") \
@@ -155,8 +165,6 @@ class StatsCounter;
V(jsarray_array_join_concat_to_sequential_string, \
"jsarray_array_join_concat_to_sequential_string") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
- V(length_tracking_gsab_backed_typed_array_length, \
- "LengthTrackingGsabBackedTypedArrayLength") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
@@ -303,18 +311,21 @@ class StatsCounter;
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
- V(intl_to_latin1_lower_table, "intl_to_latin1_lower_table")
+ V(intl_to_latin1_lower_table, "intl_to_latin1_lower_table") \
+ V(intl_ascii_collation_weights_l1, "Intl::AsciiCollationWeightsL1") \
+ V(intl_ascii_collation_weights_l3, "Intl::AsciiCollationWeightsL3")
#else
#define EXTERNAL_REFERENCE_LIST_INTL(V)
#endif // V8_INTL_SUPPORT
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_CAGED_POINTERS
#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V) \
V(virtual_memory_cage_base_address, "V8VirtualMemoryCage::base()") \
- V(virtual_memory_cage_end_address, "V8VirtualMemoryCage::end()")
+ V(virtual_memory_cage_end_address, "V8VirtualMemoryCage::end()") \
+ V(empty_backing_store_buffer, "EmptyBackingStoreBuffer()")
#else
#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V)
-#endif // V8_VIRTUAL_MEMORY_CAGE
+#endif // V8_CAGED_POINTERS
#ifdef V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V) \
@@ -398,6 +409,15 @@ class ExternalReference {
static ExternalReference Create(StatsCounter* counter);
static V8_EXPORT_PRIVATE ExternalReference Create(ApiFunction* ptr,
Type type);
+ // The following version is used by JSCallReducer in the compiler
+ // to create a reference for a fast API call, with one or more
+ // overloads. In simulator builds, it additionally "registers"
+ // the overloads with the simulator to ensure it maintains a
+ // mapping of callable Address'es to a function signature, encoding
+ // GP and FP arguments.
+ static V8_EXPORT_PRIVATE ExternalReference
+ Create(Isolate* isolate, ApiFunction* ptr, Type type, Address* c_functions,
+ const CFunctionInfo* const* c_signatures, unsigned num_functions);
static ExternalReference Create(const Runtime::Function* f);
static ExternalReference Create(IsolateAddressId id, Isolate* isolate);
static ExternalReference Create(Runtime::FunctionId id);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 2d2b368c7b..607c0aca6e 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -179,7 +179,7 @@ void Assembler::emit(Handle<HeapObject> handle) {
}
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
emit(x);
@@ -195,7 +195,7 @@ void Assembler::emit(const Immediate& x) {
emit_code_relative_offset(label);
return;
}
- if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
+ if (!RelocInfo::IsNoInfo(x.rmode_)) RecordRelocInfo(x.rmode_);
if (x.is_heap_object_request()) {
RequestHeapObject(x.heap_object_request());
emit(0);
@@ -221,7 +221,7 @@ void Assembler::emit_b(Immediate x) {
}
void Assembler::emit_w(const Immediate& x) {
- DCHECK(RelocInfo::IsNone(x.rmode_));
+ DCHECK(RelocInfo::IsNoInfo(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.immediate());
WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
pc_ += sizeof(uint16_t);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index e14d16c00a..389640e89a 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -153,9 +153,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) SetSupported(ATOM);
+ if (cpu.is_atom()) SetSupported(INTEL_ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- SetSupported(ATOM);
+ SetSupported(INTEL_ATOM);
}
// Ensure that supported cpu features make sense. E.g. it is wrong to support
@@ -188,7 +188,7 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(AVX2), CpuFeatures::IsSupported(FMA3),
CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
- CpuFeatures::IsSupported(ATOM));
+ CpuFeatures::IsSupported(INTEL_ATOM));
}
// -----------------------------------------------------------------------------
@@ -235,11 +235,11 @@ uint32_t RelocInfo::wasm_call_tag() const {
Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
// [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
+ if (disp == 0 && RelocInfo::IsNoInfo(rmode) && base != ebp) {
// [base]
set_modrm(0, base);
if (base == esp) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ } else if (is_int8(disp) && RelocInfo::IsNoInfo(rmode)) {
// [base + disp8]
set_modrm(1, base);
if (base == esp) set_sib(times_1, esp, base);
@@ -256,11 +256,11 @@ Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
+ if (disp == 0 && RelocInfo::IsNoInfo(rmode) && base != ebp) {
// [base + index*scale]
set_modrm(0, esp);
set_sib(scale, index, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ } else if (is_int8(disp) && RelocInfo::IsNoInfo(rmode)) {
// [base + index*scale + disp8]
set_modrm(1, esp);
set_sib(scale, index, base);
@@ -2861,23 +2861,6 @@ void Assembler::pd(byte opcode, XMMRegister dst, Operand src) {
}
// AVX instructions
-void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- Operand src2) {
- DCHECK(IsEnabled(FMA3));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kLIG, k66, k0F38, kW1);
- EMIT(op);
- emit_sse_operand(dst, src2);
-}
-
-void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- Operand src2) {
- DCHECK(IsEnabled(FMA3));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kLIG, k66, k0F38, kW0);
- EMIT(op);
- emit_sse_operand(dst, src2);
-}
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
@@ -3222,19 +3205,31 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
+ vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
+}
+
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
+}
+
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, VectorLength l, SIMDPrefix pp,
+ LeadingOpcode m, VexW w, CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kL128, pp, m, w);
+ emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kL128, pp, m, w);
+ emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
}
@@ -3383,7 +3378,7 @@ void Assembler::emit_operand(int code, Operand adr) {
for (unsigned i = 1; i < length; i++) EMIT(adr.encoded_bytes()[i]);
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode())) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNoInfo(adr.rmode())) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode());
if (adr.rmode() == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
@@ -3417,7 +3412,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3427,7 +3422,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
RecordRelocInfo(rmode);
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index bdf2007485..8c5f20a112 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -42,6 +42,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/constants-ia32.h"
+#include "src/codegen/ia32/fma-instr.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/ia32/sse-instr.h"
#include "src/codegen/label.h"
@@ -105,7 +106,7 @@ enum RoundingMode {
class Immediate {
public:
// Calls where x is an Address (uintptr_t) resolve to this overload.
- inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
value_.immediate = x;
rmode_ = rmode;
}
@@ -156,19 +157,21 @@ class Immediate {
return bit_cast<ExternalReference>(immediate());
}
- bool is_zero() const { return RelocInfo::IsNone(rmode_) && immediate() == 0; }
+ bool is_zero() const {
+ return RelocInfo::IsNoInfo(rmode_) && immediate() == 0;
+ }
bool is_int8() const {
- return RelocInfo::IsNone(rmode_) && i::is_int8(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_int8(immediate());
}
bool is_uint8() const {
- return RelocInfo::IsNone(rmode_) && i::is_uint8(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_uint8(immediate());
}
bool is_int16() const {
- return RelocInfo::IsNone(rmode_) && i::is_int16(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_int16(immediate());
}
bool is_uint16() const {
- return RelocInfo::IsNone(rmode_) && i::is_uint16(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_uint16(immediate());
}
RelocInfo::Mode rmode() const { return rmode_; }
@@ -233,7 +236,7 @@ class V8_EXPORT_PRIVATE Operand {
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// [rip + disp/r]
explicit Operand(Label* label) {
@@ -243,11 +246,11 @@ class V8_EXPORT_PRIVATE Operand {
// [base + index*scale + disp/r]
explicit Operand(Register base, Register index, ScaleFactor scale,
- int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
+ int32_t disp, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// [index*scale + disp/r]
explicit Operand(Register index, ScaleFactor scale, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
return Operand(index, scale, reinterpret_cast<int32_t>(table),
@@ -300,7 +303,7 @@ class V8_EXPORT_PRIVATE Operand {
// The number of bytes in buf_.
uint8_t len_ = 0;
// Only valid if len_ > 4.
- RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -1071,154 +1074,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
// AVX instructions
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd132sd(dst, src1, Operand(src2));
- }
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd213sd(dst, src1, Operand(src2));
- }
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd231sd(dst, src1, Operand(src2));
- }
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x99, dst, src1, src2);
- }
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xa9, dst, src1, src2);
- }
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xb9, dst, src1, src2);
- }
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub132sd(dst, src1, Operand(src2));
- }
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub213sd(dst, src1, Operand(src2));
- }
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub231sd(dst, src1, Operand(src2));
- }
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9b, dst, src1, src2);
- }
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xab, dst, src1, src2);
- }
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbb, dst, src1, src2);
- }
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd132sd(dst, src1, Operand(src2));
- }
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd213sd(dst, src1, Operand(src2));
- }
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd231sd(dst, src1, Operand(src2));
- }
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9d, dst, src1, src2);
- }
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xad, dst, src1, src2);
- }
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbd, dst, src1, src2);
- }
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub132sd(dst, src1, Operand(src2));
- }
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub213sd(dst, src1, Operand(src2));
- }
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub231sd(dst, src1, Operand(src2));
- }
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9f, dst, src1, src2);
- }
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xaf, dst, src1, src2);
- }
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbf, dst, src1, src2);
- }
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd132ss(dst, src1, Operand(src2));
- }
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd213ss(dst, src1, Operand(src2));
- }
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd231ss(dst, src1, Operand(src2));
- }
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x99, dst, src1, src2);
- }
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xa9, dst, src1, src2);
- }
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xb9, dst, src1, src2);
- }
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub132ss(dst, src1, Operand(src2));
- }
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub213ss(dst, src1, Operand(src2));
- }
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub231ss(dst, src1, Operand(src2));
- }
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9b, dst, src1, src2);
- }
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xab, dst, src1, src2);
- }
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbb, dst, src1, src2);
- }
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd132ss(dst, src1, Operand(src2));
- }
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd213ss(dst, src1, Operand(src2));
- }
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd231ss(dst, src1, Operand(src2));
- }
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9d, dst, src1, src2);
- }
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xad, dst, src1, src2);
- }
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbd, dst, src1, src2);
- }
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub132ss(dst, src1, Operand(src2));
- }
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub213ss(dst, src1, Operand(src2));
- }
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub231ss(dst, src1, Operand(src2));
- }
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9f, dst, src1, src2);
- }
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xaf, dst, src1, src2);
- }
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbf, dst, src1, src2);
- }
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
@@ -1755,6 +1610,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX2_BROADCAST_LIST(AVX2_INSTRUCTION)
#undef AVX2_INSTRUCTION
+#define FMA(instr, length, prefix, escape1, escape2, extension, opcode) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
+ k##escape1##escape2, k##extension, FMA3); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
+ k##escape1##escape2, k##extension, FMA3); \
+ }
+ FMA_INSTRUCTION_LIST(FMA)
+#undef FMA
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
@@ -1774,9 +1641,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
void dd(Label* label);
@@ -1883,9 +1750,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature = AVX);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature = AVX);
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, Operand rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
+ void fma_instr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w);
+ void fma_instr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/deps/v8/src/codegen/ia32/fma-instr.h b/deps/v8/src/codegen/ia32/fma-instr.h
new file mode 100644
index 0000000000..ab8746aec8
--- /dev/null
+++ b/deps/v8/src/codegen/ia32/fma-instr.h
@@ -0,0 +1,58 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef V8_CODEGEN_IA32_FMA_INSTR_H_
+#define V8_CODEGEN_IA32_FMA_INSTR_H_
+
+#define FMA_SD_INSTRUCTION_LIST(V) \
+ V(vfmadd132sd, L128, 66, 0F, 38, W1, 99) \
+ V(vfmadd213sd, L128, 66, 0F, 38, W1, a9) \
+ V(vfmadd231sd, L128, 66, 0F, 38, W1, b9) \
+ V(vfmsub132sd, L128, 66, 0F, 38, W1, 9b) \
+ V(vfmsub213sd, L128, 66, 0F, 38, W1, ab) \
+ V(vfmsub231sd, L128, 66, 0F, 38, W1, bb) \
+ V(vfnmadd132sd, L128, 66, 0F, 38, W1, 9d) \
+ V(vfnmadd213sd, L128, 66, 0F, 38, W1, ad) \
+ V(vfnmadd231sd, L128, 66, 0F, 38, W1, bd) \
+ V(vfnmsub132sd, L128, 66, 0F, 38, W1, 9f) \
+ V(vfnmsub213sd, L128, 66, 0F, 38, W1, af) \
+ V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf)
+
+#define FMA_SS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
+ V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
+ V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
+ V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
+ V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
+ V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
+ V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
+ V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
+ V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
+ V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
+ V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
+ V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf)
+
+#define FMA_PS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
+ V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
+ V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
+ V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
+ V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
+ V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc)
+
+#define FMA_PD_INSTRUCTION_LIST(V) \
+ V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
+ V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
+ V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
+ V(vfnmadd132pd, L128, 66, 0F, 38, W1, 9c) \
+ V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
+ V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
+
+#define FMA_INSTRUCTION_LIST(V) \
+ FMA_SD_INSTRUCTION_LIST(V) \
+ FMA_SS_INSTRUCTION_LIST(V) \
+ FMA_PS_INSTRUCTION_LIST(V) \
+ FMA_PD_INSTRUCTION_LIST(V)
+
+#endif // V8_CODEGEN_IA32_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 5a60679853..b4824736b9 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -326,7 +326,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
- // We don't allow a GC during a store buffer overflow so there is no need to
+ // We don't allow a GC in a write barrier slow path so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
int bytes = 0;
@@ -1212,7 +1212,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
}
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 89dd2dbcfd..e1b7e15363 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -622,7 +622,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// Utilities
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
index 131fff9a6a..d212bec035 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -672,7 +672,7 @@ int Assembler::BranchOffset(Instr instr) {
// instruction space. There is no guarantee that the relocated location can be
// similarly encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
@@ -2168,7 +2168,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -2181,7 +2181,7 @@ void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
index 5264258d93..63fe001d22 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -33,7 +33,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -43,7 +43,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -738,9 +739,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index 6577b194c4..cccfa6294c 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -865,7 +865,7 @@ void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
Register scratch) {
- DCHECK(sa >= 1 && sa <= 31);
+ DCHECK(sa >= 1 && sa <= 63);
if (sa <= 4) {
alsl_d(rd, rj, rk, sa);
} else {
@@ -2677,9 +2677,9 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
BranchShort(&skip, NegateCondition(cond), rj, rk);
}
intptr_t offset_diff = target - pc_offset();
- if (RelocInfo::IsNone(rmode) && is_int28(offset_diff)) {
+ if (RelocInfo::IsNoInfo(rmode) && is_int28(offset_diff)) {
bl(offset_diff >> 2);
- } else if (RelocInfo::IsNone(rmode) && is_int38(offset_diff)) {
+ } else if (RelocInfo::IsNoInfo(rmode) && is_int38(offset_diff)) {
pcaddu18i(t7, static_cast<int32_t>(offset_diff) >> 18);
jirl(ra, t7, (offset_diff & 0x3ffff) >> 2);
} else {
@@ -3348,7 +3348,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -3745,6 +3745,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ Push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ Pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 866a74f81c..3d82b87a47 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -963,7 +963,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1024,6 +1024,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
@@ -1062,7 +1066,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
UseScratchRegisterScope scope(this);
Register scratch = scope.Acquire();
- BlockTrampolinePoolFor((3 + case_count) * kInstrSize);
+ BlockTrampolinePoolFor(3 + case_count);
pcaddi(scratch, 3);
alsl_d(scratch, index, scratch, kInstrSizeLog2);
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index b3f8ef56b5..981ac9783f 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -40,7 +40,9 @@ enum class MachineRepresentation : uint8_t {
kTagged, // (uncompressed) Object (Smi or HeapObject)
kCompressedPointer, // (compressed) HeapObject
kCompressed, // (compressed) Object (Smi or HeapObject)
- kCagedPointer, // Guaranteed to point into the virtual memory cage.
+ // A 64-bit pointer encoded in a way (e.g. as offset) that guarantees it will
+ // point into the virtual memory cage.
+ kCagedPointer,
// FP and SIMD representations must be last, and in order of increasing size.
kFloat32,
kFloat64,
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index e1ba6e511f..267281396a 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -1133,7 +1133,7 @@ int Assembler::BranchOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
@@ -3591,7 +3591,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3602,7 +3602,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3677,7 +3677,7 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
nop();
} else {
- GenPCRelativeJump(t8, t9, 0, RelocInfo::NONE,
+ GenPCRelativeJump(t8, t9, 0, RelocInfo::NO_INFO,
BranchDelaySlot::PROTECT);
}
}
@@ -3799,7 +3799,7 @@ void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
// or when changing imm32 that lui/ori pair loads.
or_(tf, ra, zero_reg);
nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
lui(ts, (imm32 & kHiMask) >> kLuiShift);
@@ -3817,7 +3817,7 @@ void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode,
BranchDelaySlot bdslot) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
// Order of these instructions is relied upon when patching them
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 2ca7e9b363..0acee5e39d 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -63,7 +63,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -73,7 +73,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1399,9 +1400,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
void dd(Label* label);
@@ -1515,6 +1516,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
protected:
// Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
@@ -1570,8 +1573,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const { return trampoline_emitted_; }
-
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
DCHECK(!block_buffer_growth_);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 4c76a1c1ec..ea4639c37c 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -4047,7 +4047,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot);
+ GenPCRelativeJump(t8, t9, imm32, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4057,7 +4057,7 @@ void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
} else {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
- GenPCRelativeJump(t8, t9, offset, RelocInfo::NONE, bdslot);
+ GenPCRelativeJump(t8, t9, offset, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4070,7 +4070,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot);
+ GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4704,7 +4704,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -5084,6 +5084,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index eaed98dfe6..f2491fcf19 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -1074,7 +1074,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1132,6 +1132,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index a82bd5511e..9f5b34e956 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -265,7 +265,7 @@ const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(at.bit()) {
+ scratch_register_list_(at.bit() | s0.bit()) {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
EnableCpuFeature(MIPS_SIMD);
}
@@ -1061,7 +1061,7 @@ int Assembler::BranchOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
@@ -3790,7 +3790,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3801,7 +3801,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 80f282c696..f17d47e990 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -63,7 +63,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -73,7 +73,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1459,9 +1460,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
@@ -1562,6 +1563,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
protected:
// Load Scaled Address instructions.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
@@ -1618,8 +1621,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const { return trampoline_emitted_; }
-
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
DCHECK(!block_buffer_growth_);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index f580aed7c8..291d6d5b6a 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1097,13 +1097,16 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
- DCHECK(sa >= 1 && sa <= 31);
+ DCHECK(sa >= 1 && sa <= 63);
if (kArchVariant == kMips64r6 && sa <= 4) {
dlsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd == rt ? scratch : rd;
DCHECK(tmp != rt);
- dsll(tmp, rs, sa);
+ if (sa <= 31)
+ dsll(tmp, rs, sa);
+ else
+ dsll32(tmp, rs, sa - 32);
Daddu(rd, rt, tmp);
}
}
@@ -5230,7 +5233,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -5630,6 +5633,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index b1956867b4..bcb11adf69 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -1149,7 +1149,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1211,6 +1211,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
index 6f51cc43af..5e88b1e456 100644
--- a/deps/v8/src/codegen/pending-optimization-table.cc
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -30,6 +30,11 @@ void PendingOptimizationTable::PreparedForOptimization(
if (allow_heuristic_optimization) {
status |= FunctionStatus::kAllowHeuristicOptimization;
}
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+
+ IsCompiledScope is_compiled_scope;
+ SharedFunctionInfo::EnsureBytecodeArrayAvailable(isolate, shared_info,
+ &is_compiled_scope);
Handle<ObjectHashTable> table =
isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
@@ -38,7 +43,7 @@ void PendingOptimizationTable::PreparedForOptimization(
isolate->heap()->pending_optimize_for_test_bytecode()),
isolate);
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
- handle(function->shared().GetBytecodeArray(isolate), isolate),
+ handle(shared_info->GetBytecodeArray(isolate), isolate),
handle(Smi::FromInt(status), isolate), AllocationType::kYoung);
table =
ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 9274c502a8..364b20e596 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -252,7 +252,7 @@ void RelocInfo::WipeOut() {
}
}
-Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NO_INFO) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index ccb144dc61..b65fe2e729 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -1185,7 +1185,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->options().record_reloc_info_for_serialization;
- } else if (RelocInfo::IsNone(rmode_)) {
+ } else if (RelocInfo::IsNoInfo(rmode_)) {
return false;
}
return true;
@@ -1322,6 +1322,15 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
}
}
+void Assembler::patch_wasm_cpi_return_address(Register dst, int pc_offset,
+ int return_address_offset) {
+ DCHECK(is_int16(return_address_offset));
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + pc_offset, kInstrSize + kGap));
+ patching_assembler.addi(dst, dst, Operand(return_address_offset));
+}
+
void Assembler::mov_label_offset(Register dst, Label* label) {
int position = link(label);
if (label->is_bound()) {
@@ -1978,7 +1987,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -1989,7 +1998,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -2000,7 +2009,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 654c856d7d..ea82539afb 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
// rm
@@ -968,6 +968,10 @@ class Assembler : public AssemblerBase {
void bitwise_mov32(Register dst, int32_t value);
void bitwise_add32(Register dst, Register src, int32_t value);
+ // Patch the offset to the return address after CallCFunction.
+ void patch_wasm_cpi_return_address(Register dst, int pc_offset,
+ int return_address_offset);
+
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
@@ -1206,9 +1210,9 @@ class Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// Read/patch instructions
Instr instr_at(int pos) {
@@ -1303,7 +1307,7 @@ class Assembler : public AssemblerBase {
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
bool sharing_ok =
- RelocInfo::IsNone(rmode) ||
+ RelocInfo::IsNoInfo(rmode) ||
(!options().record_reloc_info_for_serialization &&
RelocInfo::IsShareableRelocMode(rmode) &&
!is_constant_pool_entry_sharing_blocked() &&
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index adc36e2407..f3359d3ca8 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -121,7 +121,7 @@ constexpr auto CallTrampolineDescriptor::registers() {
// static
constexpr auto CallVarargsDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r7 : arguments list length (untagged)
// r5 : arguments list (FixedArray)
@@ -139,13 +139,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() {
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// r4 : function template info
- // r5 : number of arguments (on the stack, not including receiver)
+ // r5 : number of arguments (on the stack)
return RegisterArray(r4, r5);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r5 : the object to spread
return RegisterArray(r4, r3, r5);
@@ -160,7 +160,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() {
// static
constexpr auto ConstructVarargsDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r6 : the new target
// r7 : arguments list length (untagged)
@@ -179,7 +179,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() {
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r6 : the new target
// r5 : the object to spread
@@ -241,7 +241,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
- return RegisterArray(r3, // argument count (not including receiver)
+ return RegisterArray(r3, // argument count
r5, // address of first argument
r4); // the target callable to be call
}
@@ -249,7 +249,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
return RegisterArray(
- r3, // argument count (not including receiver)
+ r3, // argument count
r7, // address of the first argument
r4, // constructor to call
r6, // new target
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index bc3cea67f1..724cedc1c2 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1493,20 +1493,27 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
{
- Label copy;
+ Label copy, skip;
Register src = r9, dest = r8;
addi(src, sp, Operand(-kSystemPointerSize));
ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
sub(sp, sp, r0);
// Update stack pointer.
addi(dest, sp, Operand(-kSystemPointerSize));
- addi(r0, actual_parameter_count, Operand(1));
+ if (!kJSArgcIncludesReceiver) {
+ addi(r0, actual_parameter_count, Operand(1));
+ } else {
+ mr(r0, actual_parameter_count);
+ cmpi(r0, Operand::Zero());
+ ble(&skip);
+ }
mtctr(r0);
bind(&copy);
LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize));
StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
bdnz(&copy);
+ bind(&skip);
}
// Fill remaining expected arguments with undefined values.
@@ -2013,7 +2020,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -3572,21 +3579,37 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
-void TurboAssembler::ByteReverseU16(Register dst, Register val) {
- subi(sp, sp, Operand(kSystemPointerSize));
- sth(val, MemOperand(sp));
- lhbrx(dst, MemOperand(r0, sp));
- addi(sp, sp, Operand(kSystemPointerSize));
+void TurboAssembler::ByteReverseU16(Register dst, Register val,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brh(dst, val);
+ ZeroExtHalfWord(dst, dst);
+ return;
+ }
+ rlwinm(scratch, val, 8, 16, 23);
+ rlwinm(dst, val, 24, 24, 31);
+ orx(dst, scratch, dst);
+ ZeroExtHalfWord(dst, dst);
}
-void TurboAssembler::ByteReverseU32(Register dst, Register val) {
- subi(sp, sp, Operand(kSystemPointerSize));
- stw(val, MemOperand(sp));
- lwbrx(dst, MemOperand(r0, sp));
- addi(sp, sp, Operand(kSystemPointerSize));
+void TurboAssembler::ByteReverseU32(Register dst, Register val,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brw(dst, val);
+ ZeroExtWord32(dst, dst);
+ return;
+ }
+ rotlwi(scratch, val, 8);
+ rlwimi(scratch, val, 24, 0, 7);
+ rlwimi(scratch, val, 24, 16, 23);
+ ZeroExtWord32(dst, scratch);
}
void TurboAssembler::ByteReverseU64(Register dst, Register val) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brd(dst, val);
+ return;
+ }
subi(sp, sp, Operand(kSystemPointerSize));
std(val, MemOperand(sp));
ldbrx(dst, MemOperand(r0, sp));
@@ -3819,7 +3842,7 @@ void TurboAssembler::ReverseBitsU64(Register dst, Register src,
void TurboAssembler::ReverseBitsU32(Register dst, Register src,
Register scratch1, Register scratch2) {
- ByteReverseU32(dst, src);
+ ByteReverseU32(dst, src, scratch1);
for (int i = 4; i < 8; i++) {
ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index febedfe3ba..200015bd85 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -612,8 +612,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Simd128Register scratch);
void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
- void ByteReverseU16(Register dst, Register val);
- void ByteReverseU32(Register dst, Register val);
+ void ByteReverseU16(Register dst, Register val, Register scratch);
+ void ByteReverseU32(Register dst, Register val, Register scratch);
void ByteReverseU64(Register dst, Register val);
// Before calling a C-function from generated code, align arguments on stack.
@@ -1261,7 +1261,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 1985ff28bc..6057eca4a1 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -394,7 +394,7 @@ bool RelocInfo::RequiresRelocation(Code code) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case NONE:
+ case NO_INFO:
return "no reloc";
case COMPRESSED_EMBEDDED_OBJECT:
return "compressed embedded object";
@@ -522,7 +522,7 @@ void RelocInfo::Verify(Isolate* isolate) {
Address addr = target_off_heap_target();
CHECK_NE(addr, kNullAddress);
CHECK(Builtins::IsBuiltinId(
- InstructionStream::TryLookupCode(isolate, addr)));
+ OffHeapInstructionStream::TryLookupCode(isolate, addr)));
break;
}
case RUNTIME_ENTRY:
@@ -537,7 +537,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case VENEER_POOL:
case WASM_CALL:
case WASM_STUB_CALL:
- case NONE:
+ case NO_INFO:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index cb1a04860d..b92907fbf0 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -54,7 +54,7 @@ class RelocInfo {
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
- NONE, // Never recorded value. Most common one, hence value 0.
+ NO_INFO, // Never recorded value. Most common one, hence value 0.
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
@@ -132,7 +132,7 @@ class RelocInfo {
return mode <= LAST_GCED_ENUM;
}
static constexpr bool IsShareableRelocMode(Mode mode) {
- return mode == RelocInfo::NONE ||
+ return mode == RelocInfo::NO_INFO ||
mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
@@ -191,7 +191,7 @@ class RelocInfo {
static constexpr bool IsOffHeapTarget(Mode mode) {
return mode == OFF_HEAP_TARGET;
}
- static constexpr bool IsNone(Mode mode) { return mode == NONE; }
+ static constexpr bool IsNoInfo(Mode mode) { return mode == NO_INFO; }
static bool IsOnlyForSerializer(Mode mode) {
#ifdef V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index dce8f468ce..9304f012d0 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -798,7 +798,7 @@ int Assembler::AuipcOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::disassembleInstr(Instr instr) {
@@ -2461,6 +2461,27 @@ void Assembler::EBREAK() {
}
// RVV
+
+void Assembler::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
}
@@ -2536,6 +2557,15 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
}
+void Assembler::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
+}
+
+void Assembler::vid_v(VRegister vd, MaskType mask) {
+ GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
+}
+
#define DEFINE_OPIVV(name, funct6) \
void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
@@ -2548,6 +2578,12 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
}
+#define DEFINE_OPFRED(name, funct6) \
+ void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
#define DEFINE_OPIVX(name, funct6) \
void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask) { \
@@ -2561,11 +2597,19 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
}
#define DEFINE_OPMVV(name, funct6) \
- void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
}
+// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+// VRegister vs2, MaskType mask = NoMask);
+#define DEFINE_OPMVX(name, funct6) \
+ void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
+ }
+
#define DEFINE_OPFVF(name, funct6) \
void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask) { \
@@ -2584,12 +2628,22 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
}
+// vector integer extension
+#define DEFINE_OPMVV_VIE(name, vs1) \
+ void Assembler::name(VRegister vd, VRegister vs2, MaskType mask) { \
+ GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
void Assembler::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
}
-void Assembler::vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask) {
- GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, mask);
+void Assembler::vfmv_fs(FPURegister fd, VRegister vs2) {
+ GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
+}
+
+void Assembler::vfmv_sf(VRegister vd, FPURegister fs) {
+ GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
}
DEFINE_OPIVV(vadd, VADD_FUNCT6)
@@ -2597,6 +2651,23 @@ DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
@@ -2664,14 +2735,16 @@ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
-DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
-DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
-DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
-DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
@@ -2688,6 +2761,8 @@ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
@@ -2721,6 +2796,14 @@ DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+// Vector Integer Extension
+DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
+DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
+DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
+DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
+DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
+DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
+
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
@@ -2728,6 +2811,7 @@ DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
#undef DEFINE_OPFVF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
+#undef DEFINE_OPMVV_VIE
void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
TailAgnosticType tail, MaskAgnosticType mask) {
@@ -2767,19 +2851,7 @@ uint8_t vsew_switch(VSew vsew) {
case E32:
width = 0b110;
break;
- case E64:
- width = 0b111;
- break;
- case E128:
- width = 0b000;
- break;
- case E256:
- width = 0b101;
- break;
- case E512:
- width = 0b110;
- break;
- case E1024:
+ default:
width = 0b111;
break;
}
@@ -2788,308 +2860,259 @@ uint8_t vsew_switch(VSew vsew) {
void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b000);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
}
void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b000);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
}
void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
}
void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
}
void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
}
void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
}
void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
}
void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
}
void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
}
void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
}
void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
}
void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
}
void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
}
void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
}
void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
}
void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
}
void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
}
void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
}
void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
}
void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
}
void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
}
void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
}
void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
}
void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
}
void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
}
void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
}
void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
}
void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
}
void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
}
void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
}
void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
}
void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
}
void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
}
void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
}
void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
}
void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
}
void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
}
void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
}
void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
}
void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
}
void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
}
void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
}
void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
}
void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
}
void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
}
void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
}
void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
}
void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
}
void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
}
// Privileged
@@ -3594,7 +3617,7 @@ void Assembler::db(uint8_t data) {
}
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3605,7 +3628,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
}
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3969,6 +3992,26 @@ void ConstantPool::Check(Emission force_emit, Jump require_jump,
SetNextCheckIn(ConstantPool::kCheckInterval);
}
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
// Pool entries are accessed with pc relative load therefore this cannot be more
// than 1 * MB. Since constant pool emission checks are interval based, and we
// want to keep entries close to the code, we try to emit every 64KB.
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 42bb92fd87..63e5dde19e 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -68,7 +68,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -78,7 +78,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -738,6 +739,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+ void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
@@ -747,7 +757,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
- void vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask = NoMask);
+ void vfmv_fs(FPURegister fd, VRegister vs2);
+ void vfmv_sf(VRegister vd, FPURegister fs);
+
+ void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask = NoMask);
+ void vid_v(VRegister vd, MaskType mask = Mask);
#define DEFINE_OPIVV(name, funct6) \
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
@@ -762,7 +777,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
MaskType mask = NoMask);
#define DEFINE_OPMVV(name, funct6) \
- void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
#define DEFINE_OPMVX(name, funct6) \
@@ -773,6 +788,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
+#define DEFINE_OPFRED(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
#define DEFINE_OPFVF(name, funct6) \
void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask = NoMask);
@@ -785,11 +804,31 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
MaskType mask = NoMask);
+#define DEFINE_OPMVV_VIE(name) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
+
DEFINE_OPIVV(vadd, VADD_FUNCT6)
DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+ DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+ DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+ DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+ DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
@@ -860,14 +899,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
- DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
- DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
- DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
- DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+ DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+ DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
@@ -884,6 +925,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+ DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
@@ -918,6 +960,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+ // Vector Integer Extension
+ DEFINE_OPMVV_VIE(vzext_vf8)
+ DEFINE_OPMVV_VIE(vsext_vf8)
+ DEFINE_OPMVV_VIE(vzext_vf4)
+ DEFINE_OPMVV_VIE(vsext_vf4)
+ DEFINE_OPMVV_VIE(vzext_vf2)
+ DEFINE_OPMVV_VIE(vsext_vf2)
+
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
@@ -927,6 +977,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef DEFINE_OPFVF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
+#undef DEFINE_OPMVV_VIE
+#undef DEFINE_OPFRED
#define DEFINE_VFUNARY(name, funct6, vs1) \
void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
@@ -937,17 +989,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
+ DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
+ DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
+ DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
+
DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+ DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
+ DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+ DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
#undef DEFINE_VFUNARY
- void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
+ void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vxor_vi(dst, src, -1, mask);
+ }
- void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
+ void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vrsub_vx(dst, src, zero_reg, mask);
+ }
- void vfneg_vv(VRegister dst, VRegister src) { vfsngjn_vv(dst, src, src); }
- void vfabs_vv(VRegister dst, VRegister src) { vfsngjx_vv(dst, src, src); }
+ void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjn_vv(dst, src, src, mask);
+ }
+ void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjx_vv(dst, src, src, mask);
+ }
// Privileged
void uret();
void sret();
@@ -1130,9 +1199,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
@@ -1247,6 +1316,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set(Register rd, int8_t sew, int8_t lmul) {
+ DCHECK_GE(sew, E8);
+ DCHECK_LE(sew, E64);
+ DCHECK_GE(lmul, m1);
+ DCHECK_LE(lmul, mf2);
+ set(rd, VSew(sew), Vlmul(lmul));
+ }
+
void set(RoundingMode mode) {
if (mode_ != mode) {
assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
@@ -1533,6 +1610,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
VRegister vs2, MaskType mask = NoMask);
void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2,
+ MaskType mask = NoMask);
// OPMVV OPFVV
void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
VRegister vs2, MaskType mask = NoMask);
@@ -1683,6 +1762,18 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
RegList old_available_;
};
+class LoadStoreLaneParams {
+ public:
+ int sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 173a5d0457..b5afe9b1df 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -712,6 +712,61 @@ enum Opcode : uint32_t {
RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ VDIVU_FUNCT6 = 0b100000,
+ RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+
+ VDIV_FUNCT6 = 0b100001,
+ RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VREMU_FUNCT6 = 0b100010,
+ RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
+
+ VREM_FUNCT6 = 0b100011,
+ RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHU_FUNCT6 = 0b100100,
+ RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+
+ VMUL_FUNCT6 = 0b100101,
+ RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMUL_FUNCT6 = 0b111011,
+ RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMULU_FUNCT6 = 0b111000,
+ RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHSU_FUNCT6 = 0b100110,
+ RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULH_FUNCT6 = 0b100111,
+ RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
+
+ VWADD_FUNCT6 = 0b110001,
+ RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDU_FUNCT6 = 0b110000,
+ RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDUW_FUNCT6 = 0b110101,
+ RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+
+ VCOMPRESS_FUNCT6 = 0b010111,
+ RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
+
VSADDU_FUNCT6 = 0b100000,
RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
@@ -829,11 +884,20 @@ enum Opcode : uint32_t {
RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ VSRA_FUNCT6 = 0b101001,
+ RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
+
VSLL_FUNCT6 = 0b100101,
RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ VSMUL_FUNCT6 = 0b100111,
+ RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+
VADC_FUNCT6 = 0b010000,
RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
@@ -846,13 +910,23 @@ enum Opcode : uint32_t {
VWXUNARY0_FUNCT6 = 0b010000,
VRXUNARY0_FUNCT6 = 0b010000,
+ VMUNARY0_FUNCT6 = 0b010100,
RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VID_V = 0b10001,
+
+ VXUNARY0_FUNCT6 = 0b010010,
+ RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
VWFUNARY0_FUNCT6 = 0b010000,
RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VRFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
VREDMAXU_FUNCT6 = 0b000110,
RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
VREDMAX_FUNCT6 = 0b000111,
@@ -872,9 +946,19 @@ enum Opcode : uint32_t {
VFCVT_X_F_V = 0b00001,
VFCVT_F_XU_V = 0b00010,
VFCVT_F_X_V = 0b00011,
+ VFWCVT_XU_F_V = 0b01000,
+ VFWCVT_X_F_V = 0b01001,
+ VFWCVT_F_XU_V = 0b01010,
+ VFWCVT_F_X_V = 0b01011,
+ VFWCVT_F_F_V = 0b01100,
VFNCVT_F_F_W = 0b10100,
+ VFNCVT_X_F_W = 0b10001,
+ VFNCVT_XU_F_W = 0b10000,
VFCLASS_V = 0b10000,
+ VFSQRT_V = 0b00000,
+ VFSQRT7_V = 0b00100,
+ VFREC7_V = 0b00101,
VFADD_FUNCT6 = 0b000000,
RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
@@ -918,6 +1002,9 @@ enum Opcode : uint32_t {
RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ VFREDMAX_FUNCT6 = 0b0001111,
+ RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
+
VFMIN_FUNCT6 = 0b000100,
RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
@@ -1132,14 +1219,10 @@ enum FClassFlag {
V(E8) \
V(E16) \
V(E32) \
- V(E64) \
- V(E128) \
- V(E256) \
- V(E512) \
- V(E1024)
+ V(E64)
-enum VSew {
#define DEFINE_FLAG(name) name,
+enum VSew {
RVV_SEW(DEFINE_FLAG)
#undef DEFINE_FLAG
};
@@ -1785,7 +1868,7 @@ class InstructionGetters : public T {
RVV_LMUL(CAST_VLMUL)
default:
return "unknown";
-#undef CAST_VSEW
+#undef CAST_VLMUL
}
}
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 7f93187322..8b3b76da32 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -2159,11 +2159,25 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
// they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
// and JS round semantics specify that rounding of NaN (Infinity) returns NaN
// (Infinity), so NaN and Infinity are considered rounded value too.
- li(scratch, 64 - kFloat32MantissaBits - kFloat32ExponentBits);
+ const int kFloatMantissaBits =
+ sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
+ const int kFloatExponentBits =
+ sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
+ const int kFloatExponentBias =
+ sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
+
+ // slli(rt, rs, 64 - (pos + size));
+ // if (sign_extend) {
+ // srai(rt, rt, 64 - size);
+ // } else {
+ // srli(rt, rt, 64 - size);
+ // }
+
+ li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
vsll_vx(v_scratch, src, scratch);
- li(scratch, 64 - kFloat32ExponentBits);
+ li(scratch, 64 - kFloatExponentBits);
vsrl_vx(v_scratch, v_scratch, scratch);
- li(scratch, kFloat32ExponentBias + kFloat32MantissaBits);
+ li(scratch, kFloatExponentBias + kFloatMantissaBits);
vmslt_vx(v0, v_scratch, scratch);
VU.set(frm);
@@ -2205,6 +2219,26 @@ void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
}
+void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ);
+}
+
+void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ);
+}
+
+void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE);
+}
+
+void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE);
+}
+
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
@@ -3543,6 +3577,7 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
auipc(dst, Hi20);
addi(dst, dst, Lo12);
} else {
@@ -3993,6 +4028,64 @@ void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
vsll_vi(v0, v0, 1);
vmerge_vx(dst, kScratchReg, dst);
}
+
+void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
+ MemOperand src) {
+ if (ts == 8) {
+ Lbu(kScratchReg2, src);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 16) {
+ Lhu(kScratchReg2, src);
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 32) {
+ Lwu(kScratchReg2, src);
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 64) {
+ Ld(kScratchReg2, src);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
+ MemOperand dst) {
+ if (sz == 8) {
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sb(kScratchReg, dst);
+ } else if (sz == 16) {
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sh(kScratchReg, dst);
+ } else if (sz == 32) {
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sw(kScratchReg, dst);
+ } else {
+ DCHECK_EQ(sz, 64);
+ VU.set(kScratchReg, E64, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sd(kScratchReg, dst);
+ }
+}
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4120,7 +4213,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
if (!FLAG_riscv_constant_pool) {
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 9e43eaf8aa..89d88f7af2 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -854,6 +854,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
VRegister v_scratch);
void Floor_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
+ void Trunc_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Trunc_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Round_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Round_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -953,6 +961,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Vlmul lmul);
void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
+ void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
+ void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
@@ -1187,7 +1198,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 14c993512f..2b1e4d3d65 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -55,8 +55,13 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
+#define UNALLOACTABLE_VECTOR_REGISTERS(V) \
+ V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
V(fa6) V(fa7)
@@ -374,8 +379,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
-constexpr VRegister kSimd128ScratchReg = v27;
-constexpr VRegister kSimd128ScratchReg2 = v26;
+constexpr VRegister kSimd128ScratchReg = v26;
+constexpr VRegister kSimd128ScratchReg2 = v27;
+constexpr VRegister kSimd128ScratchReg3 = v8;
constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index b3d0ffa1da..8170c02204 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -247,7 +247,7 @@ void RelocInfo::WipeOut() {
}
// Operand constructors
-Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NO_INFO) {}
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 8457e7c536..1283c87317 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -802,7 +802,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -813,7 +813,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -824,7 +824,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 86fd0190b9..cfc65f70d4 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -103,7 +103,7 @@ class V8_EXPORT_PRIVATE Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1312,9 +1312,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// Read/patch instructions
SixByteInstr instr_at(int pos) {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index a51909b936..398637c40a 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -121,7 +121,7 @@ constexpr auto CallTrampolineDescriptor::registers() {
// static
constexpr auto CallVarargsDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r6 : arguments list length (untagged)
// r4 : arguments list (FixedArray)
@@ -139,13 +139,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() {
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// r3 : function template info
- // r4 : number of arguments (on the stack, not including receiver)
+ // r4 : number of arguments (on the stack)
return RegisterArray(r3, r4);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
- // r2: number of arguments (on the stack, not including receiver)
+ // r2: number of arguments (on the stack)
// r3 : the target to call
// r4 : the object to spread
return RegisterArray(r3, r2, r4);
@@ -160,7 +160,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() {
// static
constexpr auto ConstructVarargsDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r5 : the new target
// r6 : arguments list length (untagged)
@@ -179,7 +179,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() {
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r5 : the new target
// r4 : the object to spread
@@ -211,8 +211,7 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(r3, r2); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
- // TODO(v8:11421): Implement on this platform.
- return DefaultRegisterArray();
+ return RegisterArray(r3, r2, r4);
}
// static
@@ -220,8 +219,7 @@ constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r3, r2); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
- // TODO(v8:11421): Implement on this platform.
- return DefaultRegisterArray();
+ return RegisterArray(r3, r2, r4);
}
// static
@@ -241,7 +239,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
- return RegisterArray(r2, // argument count (not including receiver)
+ return RegisterArray(r2, // argument count
r4, // address of first argument
r3); // the target callable to be call
}
@@ -249,7 +247,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
return RegisterArray(
- r2, // argument count (not including receiver)
+ r2, // argument count
r6, // address of the first argument
r3, // constructor to call
r5, // new target
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 06e98fe9d9..7080e89eec 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -496,6 +496,13 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Call(ip);
}
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this,
+ CommentForOffHeapTrampoline("tail call", builtin));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ b(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
@@ -1578,7 +1585,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
- StoreU64(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
+ StoreU64(MemOperand(ip), Operand(0, RelocInfo::NO_INFO), r0);
// Restore current context from top and clear it in debug mode.
Move(ip,
@@ -1691,7 +1698,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lay(dest, MemOperand(dest, kSystemPointerSize));
SubS64(num, num, Operand(1));
bind(&check);
- b(ge, &copy);
+ if (kJSArgcIncludesReceiver) {
+ b(gt, &copy);
+ } else {
+ b(ge, &copy);
+ }
}
// Fill remaining expected arguments with undefined values.
@@ -2013,7 +2024,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -2456,7 +2467,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
value = src.immediate();
}
- if (src.rmode() != RelocInfo::NONE) {
+ if (src.rmode() != RelocInfo::NO_INFO) {
// some form of relocation needed
RecordRelocInfo(src.rmode(), value);
}
@@ -2464,7 +2475,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
int32_t hi_32 = static_cast<int32_t>(value >> 32);
int32_t lo_32 = static_cast<int32_t>(value);
- if (src.rmode() == RelocInfo::NONE) {
+ if (src.rmode() == RelocInfo::NO_INFO) {
if (hi_32 == 0) {
if (is_uint16(lo_32)) {
llill(dst, Operand(lo_32));
@@ -3431,7 +3442,7 @@ void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
- if (opnd.rmode() == RelocInfo::NONE) {
+ if (opnd.rmode() == RelocInfo::NO_INFO) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
@@ -3447,7 +3458,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
- if (opnd.rmode() == RelocInfo::NONE) {
+ if (opnd.rmode() == RelocInfo::NO_INFO) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
@@ -3619,7 +3630,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
+ DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
@@ -5553,6 +5564,22 @@ STORE_LANE_LIST(STORE_LANE)
#undef CAN_LOAD_STORE_REVERSE
#undef IS_BIG_ENDIAN
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ LoadU64(destination, MemOperand(kRootRegister, offset));
+}
+
#undef kScratchDoubleReg
} // namespace internal
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 2a799f80f8..aa2e0ef5b8 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -45,6 +45,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin);
+ void TailCallBuiltin(Builtin builtin);
void AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value, Register new_value,
int start, int end, int shift_amount, int offset,
@@ -1267,6 +1268,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
+ void LoadStackLimit(Register destination, StackLimitKind kind);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@@ -1351,7 +1353,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 67a17d5f0e..1d08a3b4d7 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -4,6 +4,8 @@
#include "src/codegen/safepoint-table.h"
+#include <iomanip>
+
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -20,98 +22,136 @@ namespace internal {
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
- code.SafepointTableAddress(), true) {}
+ code.SafepointTableAddress()) {}
#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
- : SafepointTable(code->instruction_start(),
- code->instruction_start() + code->safepoint_table_offset(),
- false) {}
+ : SafepointTable(
+ code->instruction_start(),
+ code->instruction_start() + code->safepoint_table_offset()) {}
#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(Address instruction_start,
- Address safepoint_table_address, bool has_deopt)
+ Address safepoint_table_address)
: instruction_start_(instruction_start),
- has_deopt_(has_deopt),
safepoint_table_address_(safepoint_table_address),
- length_(ReadLength(safepoint_table_address)),
- entry_size_(ReadEntrySize(safepoint_table_address)) {}
-
-unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
- for (unsigned i = 0; i < length(); i++) {
- if (GetTrampolinePcOffset(i) == static_cast<int>(pc_offset)) {
- return GetPcOffset(i);
- } else if (GetPcOffset(i) == pc_offset) {
- return pc_offset;
+ length_(base::Memory<int>(safepoint_table_address + kLengthOffset)),
+ entry_configuration_(base::Memory<uint32_t>(safepoint_table_address +
+ kEntryConfigurationOffset)) {}
+
+int SafepointTable::find_return_pc(int pc_offset) {
+ for (int i = 0; i < length(); i++) {
+ SafepointEntry entry = GetEntry(i);
+ if (entry.trampoline_pc() == pc_offset || entry.pc() == pc_offset) {
+ return entry.pc();
}
}
UNREACHABLE();
}
SafepointEntry SafepointTable::FindEntry(Address pc) const {
- unsigned pc_offset = static_cast<unsigned>(pc - instruction_start_);
- // We use kMaxUInt32 as sentinel value, so check that we don't hit that.
- DCHECK_NE(kMaxUInt32, pc_offset);
- unsigned len = length();
- CHECK_GT(len, 0);
- // If pc == kMaxUInt32, then this entry covers all call sites in the function.
- if (len == 1 && GetPcOffset(0) == kMaxUInt32) return GetEntry(0);
- for (unsigned i = 0; i < len; i++) {
- // TODO(kasperl): Replace the linear search with binary search.
- if (GetPcOffset(i) == pc_offset ||
- (has_deopt_ &&
- GetTrampolinePcOffset(i) == static_cast<int>(pc_offset))) {
- return GetEntry(i);
+ int pc_offset = static_cast<int>(pc - instruction_start_);
+
+ // Check if the PC is pointing at a trampoline.
+ if (has_deopt_data()) {
+ int candidate = -1;
+ for (int i = 0; i < length_; ++i) {
+ int trampoline_pc = GetEntry(i).trampoline_pc();
+ if (trampoline_pc != -1 && trampoline_pc <= pc_offset) candidate = i;
+ if (trampoline_pc > pc_offset) break;
+ }
+ if (candidate != -1) return GetEntry(candidate);
+ }
+
+ for (int i = 0; i < length_; ++i) {
+ SafepointEntry entry = GetEntry(i);
+ if (i == length_ - 1 || GetEntry(i + 1).pc() > pc_offset) {
+ DCHECK_LE(entry.pc(), pc_offset);
+ return entry;
}
}
UNREACHABLE();
}
-void SafepointTable::PrintEntry(unsigned index, std::ostream& os) const {
- disasm::NameConverter converter;
- SafepointEntry entry = GetEntry(index);
- uint8_t* bits = entry.bits();
+void SafepointTable::Print(std::ostream& os) const {
+ os << "Safepoints (entries = " << length_ << ", byte size = " << byte_size()
+ << ")\n";
+
+ for (int index = 0; index < length_; index++) {
+ SafepointEntry entry = GetEntry(index);
+ os << reinterpret_cast<const void*>(instruction_start_ + entry.pc()) << " "
+ << std::setw(6) << std::hex << entry.pc() << std::dec;
+
+ if (!entry.tagged_slots().empty()) {
+ os << " slots (sp->fp): ";
+ for (uint8_t bits : entry.tagged_slots()) {
+ for (int bit = 0; bit < kBitsPerByte; ++bit) {
+ os << ((bits >> bit) & 1);
+ }
+ }
+ }
- // Print the stack slot bits.
- if (entry_size_ > 0) {
- for (uint32_t i = 0; i < entry_size_; ++i) {
- for (int bit = 0; bit < kBitsPerByte; ++bit) {
- os << ((bits[i] & (1 << bit)) ? "1" : "0");
+ if (entry.tagged_register_indexes() != 0) {
+ os << " registers: ";
+ uint32_t register_bits = entry.tagged_register_indexes();
+ int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
+ for (int j = bits - 1; j >= 0; --j) {
+ os << ((register_bits >> j) & 1);
}
}
+
+ if (entry.has_deoptimization_index()) {
+ os << " deopt " << std::setw(6) << entry.deoptimization_index()
+ << " trampoline: " << std::setw(6) << std::hex
+ << entry.trampoline_pc();
+ }
+ os << "\n";
}
}
Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler) {
- deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
- DeoptimizationInfo& new_info = deoptimization_info_.back();
- return Safepoint(new_info.stack_indexes, &new_info.register_indexes);
-}
-
-unsigned SafepointTableBuilder::GetCodeOffset() const {
- DCHECK(emitted_);
- return offset_;
+ entries_.push_back(EntryBuilder(zone_, assembler->pc_offset_for_safepoint()));
+ EntryBuilder& new_entry = entries_.back();
+ return Safepoint(new_entry.stack_indexes, &new_entry.register_indexes);
}
int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
int start,
- unsigned deopt_index) {
+ int deopt_index) {
+ DCHECK_NE(SafepointEntry::kNoTrampolinePC, trampoline);
+ DCHECK_NE(SafepointEntry::kNoDeoptIndex, deopt_index);
+ auto it = entries_.Find(start);
+ DCHECK(std::any_of(it, entries_.end(),
+ [pc](auto& entry) { return entry.pc == pc; }));
int index = start;
- for (auto it = deoptimization_info_.Find(start);
- it != deoptimization_info_.end(); it++, index++) {
- if (static_cast<int>(it->pc) == pc) {
- it->trampoline = trampoline;
- it->deopt_index = deopt_index;
- return index;
+ while (it->pc != pc) ++it, ++index;
+ it->trampoline = trampoline;
+ it->deopt_index = deopt_index;
+ return index;
+}
+
+void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
+#ifdef DEBUG
+ int last_pc = -1;
+ int last_trampoline = -1;
+ for (const EntryBuilder& entry : entries_) {
+ // Entries are ordered by PC.
+ DCHECK_LT(last_pc, entry.pc);
+ last_pc = entry.pc;
+ // Trampoline PCs are increasing, and larger than regular PCs.
+ if (entry.trampoline != SafepointEntry::kNoTrampolinePC) {
+ DCHECK_LT(last_trampoline, entry.trampoline);
+ DCHECK_LT(entries_.back().pc, entry.trampoline);
+ last_trampoline = entry.trampoline;
}
+ // An entry either has trampoline and deopt index, or none of the two.
+ DCHECK_EQ(entry.trampoline == SafepointEntry::kNoTrampolinePC,
+ entry.deopt_index == SafepointEntry::kNoDeoptIndex);
}
- UNREACHABLE();
-}
+#endif // DEBUG
-void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
- TrimEntries(&bits_per_entry);
+ TrimEntries(&tagged_slots_size);
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// We cannot emit a const pool within the safepoint table.
@@ -123,89 +163,139 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->RecordComment(";;; Safepoint table.");
offset_ = assembler->pc_offset();
- // Compute the number of bytes per safepoint entry.
- int bytes_per_entry =
- RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
+ // Compute the required sizes of the fields.
+ int used_register_indexes = 0;
+ STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
+ int max_pc = -1;
+ STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
+ int max_deopt_index = -1;
+ for (const EntryBuilder& entry : entries_) {
+ used_register_indexes |= entry.register_indexes;
+ max_pc = std::max(max_pc, std::max(entry.pc, entry.trampoline));
+ max_deopt_index = std::max(max_deopt_index, entry.deopt_index);
+ }
+
+ // Derive the bytes and bools for the entry configuration from the values.
+ auto value_to_bytes = [](int value) {
+ DCHECK_LE(0, value);
+ if (value == 0) return 0;
+ if (value <= 0xff) return 1;
+ if (value <= 0xffff) return 2;
+ if (value <= 0xffffff) return 3;
+ return 4;
+ };
+ bool has_deopt_data = max_deopt_index != -1;
+ int register_indexes_size = value_to_bytes(used_register_indexes);
+ // Add 1 so all values are non-negative.
+ int pc_size = value_to_bytes(max_pc + 1);
+ int deopt_index_size = value_to_bytes(max_deopt_index + 1);
+ int tagged_slots_bytes =
+ (tagged_slots_size + kBitsPerByte - 1) / kBitsPerByte;
+
+ // Add a CHECK to ensure we never overflow the space in the bitfield, even for
+ // huge functions which might not be covered by tests.
+ CHECK(SafepointTable::RegisterIndexesSizeField::is_valid(
+ register_indexes_size) &&
+ SafepointTable::PcSizeField::is_valid(pc_size) &&
+ SafepointTable::DeoptIndexSizeField::is_valid(deopt_index_size) &&
+ SafepointTable::TaggedSlotsBytesField::is_valid(tagged_slots_bytes));
+
+ uint32_t entry_configuration =
+ SafepointTable::HasDeoptDataField::encode(has_deopt_data) |
+ SafepointTable::RegisterIndexesSizeField::encode(register_indexes_size) |
+ SafepointTable::PcSizeField::encode(pc_size) |
+ SafepointTable::DeoptIndexSizeField::encode(deopt_index_size) |
+ SafepointTable::TaggedSlotsBytesField::encode(tagged_slots_bytes);
// Emit the table header.
STATIC_ASSERT(SafepointTable::kLengthOffset == 0 * kIntSize);
- STATIC_ASSERT(SafepointTable::kEntrySizeOffset == 1 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kEntryConfigurationOffset == 1 * kIntSize);
STATIC_ASSERT(SafepointTable::kHeaderSize == 2 * kIntSize);
- int length = static_cast<int>(deoptimization_info_.size());
+ int length = static_cast<int>(entries_.size());
assembler->dd(length);
- assembler->dd(bytes_per_entry);
-
- // Emit sorted table of pc offsets together with additional info (i.e. the
- // deoptimization index or arguments count) and trampoline offsets.
- STATIC_ASSERT(SafepointTable::kPcOffset == 0 * kIntSize);
- STATIC_ASSERT(SafepointTable::kEncodedInfoOffset == 1 * kIntSize);
- STATIC_ASSERT(SafepointTable::kTrampolinePcOffset == 2 * kIntSize);
- STATIC_ASSERT(SafepointTable::kFixedEntrySize == 3 * kIntSize);
- for (const DeoptimizationInfo& info : deoptimization_info_) {
- assembler->dd(info.pc);
- if (info.register_indexes) {
- // We emit the register indexes in the same bits as the deopt_index.
- // Register indexes and deopt_index should not exist at the same time.
- DCHECK_EQ(info.deopt_index,
- static_cast<uint32_t>(Safepoint::kNoDeoptimizationIndex));
- assembler->dd(info.register_indexes);
- } else {
- assembler->dd(info.deopt_index);
+ assembler->dd(entry_configuration);
+
+ auto emit_bytes = [assembler](int value, int bytes) {
+ DCHECK_LE(0, value);
+ for (; bytes > 0; --bytes, value >>= 8) assembler->db(value);
+ DCHECK_EQ(0, value);
+ };
+ // Emit entries, sorted by pc offsets.
+ for (const EntryBuilder& entry : entries_) {
+ emit_bytes(entry.pc, pc_size);
+ if (has_deopt_data) {
+ // Add 1 so all values are non-negative.
+ emit_bytes(entry.deopt_index + 1, deopt_index_size);
+ emit_bytes(entry.trampoline + 1, pc_size);
}
- assembler->dd(info.trampoline);
+ emit_bytes(entry.register_indexes, register_indexes_size);
}
- // Emit table of bitmaps.
- ZoneVector<uint8_t> bits(bytes_per_entry, 0, zone_);
- for (const DeoptimizationInfo& info : deoptimization_info_) {
- ZoneChunkList<int>* indexes = info.stack_indexes;
+ // Emit bitmaps of tagged stack slots.
+ ZoneVector<uint8_t> bits(tagged_slots_bytes, 0, zone_);
+ for (const EntryBuilder& entry : entries_) {
std::fill(bits.begin(), bits.end(), 0);
// Run through the indexes and build a bitmap.
- for (int idx : *indexes) {
- DCHECK_GT(bits_per_entry, idx);
- int index = bits_per_entry - 1 - idx;
+ for (int idx : *entry.stack_indexes) {
+ DCHECK_GT(tagged_slots_size, idx);
+ int index = tagged_slots_size - 1 - idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1U << bit_index);
+ bits[byte_index] |= (1u << bit_index);
}
// Emit the bitmap for the current entry.
- for (int k = 0; k < bytes_per_entry; k++) {
- assembler->db(bits[k]);
- }
+ for (uint8_t byte : bits) assembler->db(byte);
}
- emitted_ = true;
}
void SafepointTableBuilder::RemoveDuplicates() {
- // If the table contains more than one entry, and all entries are identical
- // (except for the pc), replace the whole table by a single entry with pc =
- // kMaxUInt32. This especially compacts the table for wasm code without tagged
- // pointers and without deoptimization info.
-
- if (deoptimization_info_.size() < 2) return;
-
- // Check that all entries (1, size] are identical to entry 0.
- const DeoptimizationInfo& first_info = deoptimization_info_.front();
- for (auto it = deoptimization_info_.Find(1); it != deoptimization_info_.end();
- it++) {
- if (!IsIdenticalExceptForPc(first_info, *it)) return;
+ // Remove any duplicate entries, i.e. succeeding entries that are identical
+ // except for the PC. During lookup, we will find the first entry whose PC is
+ // not larger than the PC at hand, and find the first non-duplicate.
+
+ if (entries_.size() < 2) return;
+
+ auto is_identical_except_for_pc = [](const EntryBuilder& entry1,
+ const EntryBuilder& entry2) {
+ if (entry1.deopt_index != entry2.deopt_index) return false;
+ DCHECK_EQ(entry1.trampoline, entry2.trampoline);
+
+ ZoneChunkList<int>* indexes1 = entry1.stack_indexes;
+ ZoneChunkList<int>* indexes2 = entry2.stack_indexes;
+ if (indexes1->size() != indexes2->size()) return false;
+ if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
+ return false;
+ }
+
+ if (entry1.register_indexes != entry2.register_indexes) return false;
+
+ return true;
+ };
+
+ auto remaining_it = entries_.begin();
+ size_t remaining = 0;
+
+ for (auto it = entries_.begin(), end = entries_.end(); it != end;
+ ++remaining_it, ++remaining) {
+ if (remaining_it != it) *remaining_it = *it;
+ // Merge identical entries.
+ do {
+ ++it;
+ } while (it != end && is_identical_except_for_pc(*it, *remaining_it));
}
- // If we get here, all entries were identical. Rewind the list to just one
- // entry, and set the pc to kMaxUInt32.
- deoptimization_info_.Rewind(1);
- deoptimization_info_.front().pc = kMaxUInt32;
+ entries_.Rewind(remaining);
}
-void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
- int min_index = *bits_per_entry;
+void SafepointTableBuilder::TrimEntries(int* tagged_slots_size) {
+ int min_index = *tagged_slots_size;
if (min_index == 0) return; // Early exit: nothing to trim.
- for (auto& info : deoptimization_info_) {
- for (int idx : *info.stack_indexes) {
- DCHECK_GT(*bits_per_entry, idx); // Validity check.
+ for (auto& entry : entries_) {
+ for (int idx : *entry.stack_indexes) {
+ DCHECK_GT(*tagged_slots_size, idx); // Validity check.
if (idx >= min_index) continue;
if (idx == 0) return; // Early exit: nothing to trim.
min_index = idx;
@@ -213,29 +303,13 @@ void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
}
DCHECK_LT(0, min_index);
- *bits_per_entry -= min_index;
- for (auto& info : deoptimization_info_) {
- for (int& idx : *info.stack_indexes) {
+ *tagged_slots_size -= min_index;
+ for (auto& entry : entries_) {
+ for (int& idx : *entry.stack_indexes) {
idx -= min_index;
}
}
}
-bool SafepointTableBuilder::IsIdenticalExceptForPc(
- const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
- if (info1.deopt_index != info2.deopt_index) return false;
-
- ZoneChunkList<int>* indexes1 = info1.stack_indexes;
- ZoneChunkList<int>* indexes2 = info2.stack_indexes;
- if (indexes1->size() != indexes2->size()) return false;
- if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
- return false;
- }
-
- if (info1.register_indexes != info2.register_indexes) return false;
-
- return true;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 07bbcaf9a0..4201d5fc2f 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/bit-field.h"
#include "src/base/iterator.h"
#include "src/base/memory.h"
#include "src/common/assert-scope.h"
@@ -22,72 +23,64 @@ class WasmCode;
class SafepointEntry {
public:
+ static constexpr int kNoDeoptIndex = -1;
+ static constexpr int kNoTrampolinePC = -1;
+
SafepointEntry() = default;
- SafepointEntry(unsigned deopt_index, uint8_t* bits, uint8_t* bits_end,
- int trampoline_pc)
- : deopt_index_(deopt_index),
- bits_(bits),
- bits_end_(bits_end),
+ SafepointEntry(int pc, int deopt_index, uint32_t tagged_register_indexes,
+ base::Vector<uint8_t> tagged_slots, int trampoline_pc)
+ : pc_(pc),
+ deopt_index_(deopt_index),
+ tagged_register_indexes_(tagged_register_indexes),
+ tagged_slots_(tagged_slots),
trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
- bool is_valid() const { return bits_ != nullptr; }
+ bool is_valid() const { return tagged_slots_.begin() != nullptr; }
- bool Equals(const SafepointEntry& other) const {
- return deopt_index_ == other.deopt_index_ && bits_ == other.bits_;
+ bool operator==(const SafepointEntry& other) const {
+ return pc_ == other.pc_ && deopt_index_ == other.deopt_index_ &&
+ tagged_register_indexes_ == other.tagged_register_indexes_ &&
+ tagged_slots_ == other.tagged_slots_ &&
+ trampoline_pc_ == other.trampoline_pc_;
}
void Reset() {
- deopt_index_ = 0;
- bits_ = nullptr;
- bits_end_ = nullptr;
+ *this = SafepointEntry{};
+ DCHECK(!is_valid());
}
- int trampoline_pc() { return trampoline_pc_; }
+ int pc() const { return pc_; }
- static const unsigned kNoDeoptIndex = kMaxUInt32;
- static constexpr int kNoTrampolinePC = -1;
+ int trampoline_pc() const { return trampoline_pc_; }
- int deoptimization_index() const {
- DCHECK(is_valid() && has_deoptimization_index());
- return deopt_index_;
- }
-
- uint32_t register_bits() const {
- // The register bits use the same field as the deopt_index_.
- DCHECK(is_valid());
- return deopt_index_;
- }
-
- bool has_register_bits() const {
- // The register bits use the same field as the deopt_index_.
+ bool has_deoptimization_index() const {
DCHECK(is_valid());
return deopt_index_ != kNoDeoptIndex;
}
- bool has_deoptimization_index() const {
- DCHECK(is_valid());
- return deopt_index_ != kNoDeoptIndex;
+ int deoptimization_index() const {
+ DCHECK(is_valid() && has_deoptimization_index());
+ return deopt_index_;
}
- uint8_t* bits() const {
+ uint32_t tagged_register_indexes() const {
DCHECK(is_valid());
- return bits_;
+ return tagged_register_indexes_;
}
- base::iterator_range<uint8_t*> iterate_bits() const {
- return base::make_iterator_range(bits_, bits_end_);
+ base::Vector<const uint8_t> tagged_slots() const {
+ DCHECK(is_valid());
+ return tagged_slots_;
}
- size_t entry_size() const { return bits_end_ - bits_; }
-
private:
- uint32_t deopt_index_ = 0;
- uint8_t* bits_ = nullptr;
- uint8_t* bits_end_ = nullptr;
- // It needs to be an integer as it is -1 for eager deoptimizations.
+ int pc_ = -1;
+ int deopt_index_ = kNoDeoptIndex;
+ uint32_t tagged_register_indexes_ = 0;
+ base::Vector<uint8_t> tagged_slots_;
int trampoline_pc_ = kNoTrampolinePC;
};
@@ -103,89 +96,101 @@ class SafepointTable {
SafepointTable(const SafepointTable&) = delete;
SafepointTable& operator=(const SafepointTable&) = delete;
- int size() const {
- return kHeaderSize + (length_ * (kFixedEntrySize + entry_size_));
- }
- unsigned length() const { return length_; }
- unsigned entry_size() const { return entry_size_; }
+ int length() const { return length_; }
- unsigned GetPcOffset(unsigned index) const {
- DCHECK(index < length_);
- return base::Memory<uint32_t>(GetPcOffsetLocation(index));
+ int byte_size() const {
+ return kHeaderSize + length_ * (entry_size() + tagged_slots_bytes());
}
- int GetTrampolinePcOffset(unsigned index) const {
- DCHECK(index < length_);
- return base::Memory<int>(GetTrampolineLocation(index));
- }
-
- unsigned find_return_pc(unsigned pc_offset);
-
- SafepointEntry GetEntry(unsigned index) const {
- DCHECK(index < length_);
- unsigned deopt_index =
- base::Memory<uint32_t>(GetEncodedInfoLocation(index));
- uint8_t* bits = &base::Memory<uint8_t>(entries() + (index * entry_size_));
- int trampoline_pc = has_deopt_
- ? base::Memory<int>(GetTrampolineLocation(index))
- : SafepointEntry::kNoTrampolinePC;
- return SafepointEntry(deopt_index, bits, bits + entry_size_, trampoline_pc);
+ int find_return_pc(int pc_offset);
+
+ SafepointEntry GetEntry(int index) const {
+ DCHECK_GT(length_, index);
+ Address entry_ptr =
+ safepoint_table_address_ + kHeaderSize + index * entry_size();
+
+ int pc = read_bytes(&entry_ptr, pc_size());
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
+ int trampoline_pc = SafepointEntry::kNoTrampolinePC;
+ if (has_deopt_data()) {
+ deopt_index = read_bytes(&entry_ptr, deopt_index_size()) - 1;
+ trampoline_pc = read_bytes(&entry_ptr, pc_size()) - 1;
+ DCHECK(deopt_index >= 0 || deopt_index == SafepointEntry::kNoDeoptIndex);
+ DCHECK(trampoline_pc >= 0 ||
+ trampoline_pc == SafepointEntry::kNoTrampolinePC);
+ }
+ int tagged_register_indexes =
+ read_bytes(&entry_ptr, register_indexes_size());
+
+ // Entry bits start after the the vector of entries (thus the pc offset of
+ // the non-existing entry after the last one).
+ uint8_t* tagged_slots_start = reinterpret_cast<uint8_t*>(
+ safepoint_table_address_ + kHeaderSize + length_ * entry_size());
+ base::Vector<uint8_t> tagged_slots(
+ tagged_slots_start + index * tagged_slots_bytes(),
+ tagged_slots_bytes());
+
+ return SafepointEntry(pc, deopt_index, tagged_register_indexes,
+ tagged_slots, trampoline_pc);
}
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index, std::ostream& os) const;
+ void Print(std::ostream&) const;
private:
- SafepointTable(Address instruction_start, Address safepoint_table_address,
- bool has_deopt);
-
- static const uint8_t kNoRegisters = 0xFF;
-
// Layout information.
- static const int kLengthOffset = 0;
- static const int kEntrySizeOffset = kLengthOffset + kIntSize;
- static const int kHeaderSize = kEntrySizeOffset + kIntSize;
- static const int kPcOffset = 0;
- static const int kEncodedInfoOffset = kPcOffset + kIntSize;
- static const int kTrampolinePcOffset = kEncodedInfoOffset + kIntSize;
- static const int kFixedEntrySize = kTrampolinePcOffset + kIntSize;
-
- static uint32_t ReadLength(Address table) {
- return base::Memory<uint32_t>(table + kLengthOffset);
+ static constexpr int kLengthOffset = 0;
+ static constexpr int kEntryConfigurationOffset = kLengthOffset + kIntSize;
+ static constexpr int kHeaderSize = kEntryConfigurationOffset + kUInt32Size;
+
+ using HasDeoptDataField = base::BitField<bool, 0, 1>;
+ using RegisterIndexesSizeField = HasDeoptDataField::Next<int, 3>;
+ using PcSizeField = RegisterIndexesSizeField::Next<int, 3>;
+ using DeoptIndexSizeField = PcSizeField::Next<int, 3>;
+ // In 22 bits, we can encode up to 4M bytes, corresponding to 32M frame slots,
+ // which is 128MB on 32-bit and 256MB on 64-bit systems. The stack size is
+ // limited to a bit below 1MB anyway (see FLAG_stack_size).
+ using TaggedSlotsBytesField = DeoptIndexSizeField::Next<int, 22>;
+
+ SafepointTable(Address instruction_start, Address safepoint_table_address);
+
+ int entry_size() const {
+ int deopt_data_size = has_deopt_data() ? pc_size() + deopt_index_size() : 0;
+ return pc_size() + deopt_data_size + register_indexes_size();
}
- static uint32_t ReadEntrySize(Address table) {
- return base::Memory<uint32_t>(table + kEntrySizeOffset);
- }
- Address pc_and_deoptimization_indexes() const {
- return safepoint_table_address_ + kHeaderSize;
+
+ int tagged_slots_bytes() const {
+ return TaggedSlotsBytesField::decode(entry_configuration_);
}
- Address entries() const {
- return safepoint_table_address_ + kHeaderSize + (length_ * kFixedEntrySize);
+ bool has_deopt_data() const {
+ return HasDeoptDataField::decode(entry_configuration_);
}
-
- Address GetPcOffsetLocation(unsigned index) const {
- return pc_and_deoptimization_indexes() + (index * kFixedEntrySize);
+ int pc_size() const { return PcSizeField::decode(entry_configuration_); }
+ int deopt_index_size() const {
+ return DeoptIndexSizeField::decode(entry_configuration_);
}
-
- Address GetEncodedInfoLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kEncodedInfoOffset;
+ int register_indexes_size() const {
+ return RegisterIndexesSizeField::decode(entry_configuration_);
}
- Address GetTrampolineLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kTrampolinePcOffset;
+ static int read_bytes(Address* ptr, int bytes) {
+ uint32_t result = 0;
+ for (int b = 0; b < bytes; ++b, ++*ptr) {
+ result |= uint32_t{*reinterpret_cast<uint8_t*>(*ptr)} << (8 * b);
+ }
+ return static_cast<int>(result);
}
DISALLOW_GARBAGE_COLLECTION(no_gc_)
const Address instruction_start_;
- const bool has_deopt_;
// Safepoint table layout.
const Address safepoint_table_address_;
- const uint32_t length_;
- const uint32_t entry_size_;
+ const int length_;
+ const uint32_t entry_configuration_;
friend class SafepointTableBuilder;
friend class SafepointEntry;
@@ -193,13 +198,11 @@ class SafepointTable {
class Safepoint {
public:
- static const int kNoDeoptimizationIndex = SafepointEntry::kNoDeoptIndex;
-
void DefinePointerSlot(int index) { stack_indexes_->push_back(index); }
void DefineRegister(int reg_code) {
// Make sure the recorded index is always less than 31, so that we don't
- // generate {kNoDeoptimizationIndex} by accident.
+ // generate {kNoDeoptIndex} by accident.
DCHECK_LT(reg_code, 31);
*register_indexes_ |= 1u << reg_code;
}
@@ -215,16 +218,18 @@ class Safepoint {
class SafepointTableBuilder {
public:
- explicit SafepointTableBuilder(Zone* zone)
- : deoptimization_info_(zone),
- emitted_(false),
- zone_(zone) {}
+ explicit SafepointTableBuilder(Zone* zone) : entries_(zone), zone_(zone) {}
SafepointTableBuilder(const SafepointTableBuilder&) = delete;
SafepointTableBuilder& operator=(const SafepointTableBuilder&) = delete;
+ bool emitted() const { return offset_ != -1; }
+
// Get the offset of the emitted safepoint table in the code.
- unsigned GetCodeOffset() const;
+ int GetCodeOffset() const {
+ DCHECK(emitted());
+ return offset_;
+ }
// Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint(Assembler* assembler);
@@ -238,41 +243,36 @@ class SafepointTableBuilder {
// table contains the trampoline PC {trampoline} that replaced the
// return PC {pc} on the stack.
int UpdateDeoptimizationInfo(int pc, int trampoline, int start,
- unsigned deopt_index);
+ int deopt_index);
private:
- struct DeoptimizationInfo {
- unsigned pc;
- unsigned deopt_index;
+ struct EntryBuilder {
+ int pc;
+ int deopt_index;
int trampoline;
ZoneChunkList<int>* stack_indexes;
uint32_t register_indexes;
- DeoptimizationInfo(Zone* zone, unsigned pc)
+ EntryBuilder(Zone* zone, int pc)
: pc(pc),
- deopt_index(Safepoint::kNoDeoptimizationIndex),
- trampoline(-1),
+ deopt_index(SafepointEntry::kNoDeoptIndex),
+ trampoline(SafepointEntry::kNoTrampolinePC),
stack_indexes(zone->New<ZoneChunkList<int>>(
zone, ZoneChunkList<int>::StartMode::kSmall)),
register_indexes(0) {}
};
- // Compares all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
- bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
- const DeoptimizationInfo&) const;
-
- // If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
+ // Remove consecutive identical entries.
void RemoveDuplicates();
// Try to trim entries by removing trailing zeros (and shrinking
// {bits_per_entry}).
void TrimEntries(int* bits_per_entry);
- ZoneChunkList<DeoptimizationInfo> deoptimization_info_;
+ ZoneChunkList<EntryBuilder> entries_;
- unsigned offset_;
- bool emitted_;
+ int offset_ = -1;
- Zone* zone_;
+ Zone* const zone_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/script-details.h b/deps/v8/src/codegen/script-details.h
index e342e132d7..5317ae989e 100644
--- a/deps/v8/src/codegen/script-details.h
+++ b/deps/v8/src/codegen/script-details.h
@@ -29,7 +29,7 @@ struct ScriptDetails {
int column_offset;
MaybeHandle<Object> name_obj;
MaybeHandle<Object> source_map_url;
- MaybeHandle<FixedArray> host_defined_options;
+ MaybeHandle<Object> host_defined_options;
REPLMode repl_mode;
const ScriptOriginOptions origin_options;
};
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index bbfe894443..4dd54fd6f0 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -1205,6 +1205,101 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
}
}
+// Helper macro to define qfma macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMA(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmovups(dst, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vadd##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ if (dst == src1) { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ add##ps_or_pd(dst, tmp); \
+ } else if (dst == src2) { \
+ DCHECK_NE(src2, src1); \
+ mul##ps_or_pd(src2, src3); \
+ add##ps_or_pd(src2, src1); \
+ } else if (dst == src3) { \
+ DCHECK_NE(src3, src1); \
+ mul##ps_or_pd(src3, src2); \
+ add##ps_or_pd(src3, src1); \
+ } else { \
+ movaps(dst, src2); \
+ mul##ps_or_pd(dst, src3); \
+ add##ps_or_pd(dst, src1); \
+ } \
+ }
+
+// Helper macro to define qfms macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMS(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfnmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfnmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmovups(dst, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vsub##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ sub##ps_or_pd(dst, tmp); \
+ }
+
+void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(ps)
+}
+
+void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(ps)
+}
+
+void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(pd);
+}
+
+void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(pd);
+}
+
+#undef QFMOP
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 325dfea7d0..abe1d6200a 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -476,6 +476,15 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void S128Load32Splat(XMMRegister dst, Operand src);
void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
protected:
template <typename Op>
using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Op, uint8_t);
@@ -900,7 +909,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
vpshufb(dst, tmp1, dst);
vpshufb(tmp2, tmp1, tmp2);
vpaddb(dst, dst, tmp2);
- } else if (CpuFeatures::IsSupported(ATOM)) {
+ } else if (CpuFeatures::IsSupported(INTEL_ATOM)) {
// Pre-Goldmont low-power Intel microarchitectures have very slow
// PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
// algorithm on these processors. ATOM CPU feature captures exactly
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 09c4559813..e1546f71ca 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -50,6 +50,12 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
+ } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
+ isolate()->builtins()->IsBuiltinCodeDataContainerHandle(
+ object, &builtin)) {
+ // Similar to roots, builtins may be loaded from the builtins table.
+ LoadRootRelative(destination,
+ RootRegisterOffsetForBuiltinCodeDataContainer(builtin));
} else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
// Similar to roots, builtins may be loaded from the builtins table.
LoadRootRelative(destination, RootRegisterOffsetForBuiltin(builtin));
@@ -101,6 +107,12 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
}
// static
+int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinCodeDataContainer(
+ Builtin builtin) {
+ return IsolateData::BuiltinCodeDataContainerSlotOffset(builtin);
+}
+
+// static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 2f2deadaac..7403aa1bfd 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -80,6 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
static int32_t RootRegisterOffsetForBuiltin(Builtin builtin);
+ static int32_t RootRegisterOffsetForBuiltinCodeDataContainer(Builtin builtin);
// Returns the root-relative offset to reference.address().
static intptr_t RootRegisterOffsetForExternalReference(
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.cc b/deps/v8/src/codegen/unoptimized-compilation-info.cc
index 08cd818188..d0bc2d159d 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.cc
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.cc
@@ -18,7 +18,10 @@ namespace internal {
UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
ParseInfo* parse_info,
FunctionLiteral* literal)
- : flags_(parse_info->flags()), feedback_vector_spec_(zone) {
+ : flags_(parse_info->flags()),
+ dispatcher_(parse_info->dispatcher()),
+ character_stream_(parse_info->character_stream()),
+ feedback_vector_spec_(zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this OptimizedCompilationInfo. As such,
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.h b/deps/v8/src/codegen/unoptimized-compilation-info.h
index 3cdb94158b..b7fb1e8de6 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.h
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.h
@@ -35,6 +35,10 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
FunctionLiteral* literal);
const UnoptimizedCompileFlags& flags() const { return flags_; }
+ LazyCompileDispatcher* dispatcher() { return dispatcher_; }
+ const Utf16CharacterStream* character_stream() const {
+ return character_stream_;
+ }
// Accessors for the input data of the function being compiled.
@@ -86,6 +90,10 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
// Compilation flags.
const UnoptimizedCompileFlags flags_;
+ // For dispatching eager compilation of lazily compiled functions.
+ LazyCompileDispatcher* dispatcher_;
+ const Utf16CharacterStream* character_stream_;
+
// The root AST node of the function literal being compiled.
FunctionLiteral* literal_;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 8e451e641e..cb2f67850a 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -45,14 +45,14 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
}
void Assembler::emit(Immediate x) {
- if (!RelocInfo::IsNone(x.rmode_)) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) {
RecordRelocInfo(x.rmode_);
}
emitl(x.value_);
}
void Assembler::emit(Immediate64 x) {
- if (!RelocInfo::IsNone(x.rmode_)) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) {
RecordRelocInfo(x.rmode_);
}
emitq(static_cast<uint64_t>(x.value_));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index fe0403b80e..0fdeee7685 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -103,9 +103,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) SetSupported(ATOM);
+ if (cpu.is_atom()) SetSupported(INTEL_ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- SetSupported(ATOM);
+ SetSupported(INTEL_ATOM);
}
// Ensure that supported cpu features make sense. E.g. it is wrong to support
@@ -141,7 +141,7 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(AVX2), CpuFeatures::IsSupported(FMA3),
CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
- CpuFeatures::IsSupported(ATOM));
+ CpuFeatures::IsSupported(INTEL_ATOM));
}
// -----------------------------------------------------------------------------
@@ -276,7 +276,7 @@ bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
// Currently, partial constant pool only handles the following kinds of
// RelocInfo.
- if (mode != RelocInfo::NONE && mode != RelocInfo::EXTERNAL_REFERENCE &&
+ if (mode != RelocInfo::NO_INFO && mode != RelocInfo::EXTERNAL_REFERENCE &&
mode != RelocInfo::OFF_HEAP_TARGET)
return false;
@@ -330,7 +330,8 @@ void Assembler::PatchConstPool() {
bool Assembler::UseConstPoolFor(RelocInfo::Mode rmode) {
if (!FLAG_partial_constant_pool) return false;
- return (rmode == RelocInfo::NONE || rmode == RelocInfo::EXTERNAL_REFERENCE ||
+ return (rmode == RelocInfo::NO_INFO ||
+ rmode == RelocInfo::EXTERNAL_REFERENCE ||
rmode == RelocInfo::OFF_HEAP_TARGET);
}
@@ -703,7 +704,7 @@ void Assembler::immediate_arithmetic_op(byte subcode, Register dst,
Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNoInfo(src.rmode_)) {
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
@@ -721,7 +722,7 @@ void Assembler::immediate_arithmetic_op(byte subcode, Operand dst,
Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNoInfo(src.rmode_)) {
emit(0x83);
emit_operand(subcode, dst);
emit(src.value_);
@@ -1020,7 +1021,7 @@ void Assembler::near_jmp(intptr_t disp, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit(0xE9);
DCHECK(is_int32(disp));
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
emitl(static_cast<int32_t>(disp));
}
@@ -3416,30 +3417,33 @@ void Assembler::pmovmskb(Register dst, XMMRegister src) {
}
// AVX instructions
-
-void Assembler::vmovddup(XMMRegister dst, XMMRegister src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF2, k0F, kWIG);
- emit(0x12);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::vmovddup(XMMRegister dst, Operand src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF2, k0F, kWIG);
- emit(0x12);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::vmovshdup(XMMRegister dst, XMMRegister src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
- emit(0x16);
- emit_sse_operand(dst, src);
-}
+#define VMOV_DUP(SIMDRegister, length) \
+ void Assembler::vmovddup(SIMDRegister dst, SIMDRegister src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF2, k0F, kWIG); \
+ emit(0x12); \
+ emit_sse_operand(dst, src); \
+ } \
+ \
+ void Assembler::vmovddup(SIMDRegister dst, Operand src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF2, k0F, kWIG); \
+ emit(0x12); \
+ emit_sse_operand(dst, src); \
+ } \
+ \
+ void Assembler::vmovshdup(SIMDRegister dst, SIMDRegister src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF3, k0F, kWIG); \
+ emit(0x16); \
+ emit_sse_operand(dst, src); \
+ }
+VMOV_DUP(XMMRegister, L128)
+VMOV_DUP(YMMRegister, L256)
+#undef VMOV_DUP
#define BROADCASTSS(SIMDRegister, length) \
void Assembler::vbroadcastss(SIMDRegister dst, Operand src) { \
@@ -3737,22 +3741,27 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit(imm8);
}
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
- emit(op);
- emit_sse_operand(dst, src2);
-}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
- emit(op);
- emit_sse_operand(dst, src2);
-}
+#define VPD(SIMDRegister, length) \
+ void Assembler::vpd(byte op, SIMDRegister dst, SIMDRegister src1, \
+ SIMDRegister src2) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, src1, src2, k##length, k66, k0F, kWIG); \
+ emit(op); \
+ emit_sse_operand(dst, src2); \
+ } \
+ \
+ void Assembler::vpd(byte op, SIMDRegister dst, SIMDRegister src1, \
+ Operand src2) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, src1, src2, k##length, k66, k0F, kWIG); \
+ emit(op); \
+ emit_sse_operand(dst, src2); \
+ }
+VPD(XMMRegister, L128)
+VPD(YMMRegister, L256)
+#undef VPD
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
@@ -4362,7 +4371,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -4372,7 +4381,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 41ba5f4ac1..2c89157979 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -130,7 +130,7 @@ class Immediate {
private:
const int32_t value_;
- const RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ const RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
friend class Assembler;
};
@@ -148,7 +148,7 @@ class Immediate64 {
private:
const int64_t value_;
- const RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ const RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
friend class Assembler;
};
@@ -1156,6 +1156,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instruction(XMMRegister dst, Operand src) { \
vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(YMMRegister dst, YMMRegister src) { \
+ vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(YMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
}
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_UNOP_AVX_INSTRUCTION)
@@ -1167,6 +1173,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vpblendvb(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0, AVX2);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
@@ -1174,6 +1186,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vblendvps(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0, AVX);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
@@ -1181,6 +1199,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vblendvpd(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0, AVX);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
#define DECLARE_SSE4_PMOV_AVX_INSTRUCTION(instruction, prefix, escape1, \
escape2, opcode) \
@@ -1329,7 +1353,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// AVX instruction
void vmovddup(XMMRegister dst, XMMRegister src);
void vmovddup(XMMRegister dst, Operand src);
+ void vmovddup(YMMRegister dst, YMMRegister src);
+ void vmovddup(YMMRegister dst, Operand src);
void vmovshdup(XMMRegister dst, XMMRegister src);
+ void vmovshdup(YMMRegister dst, YMMRegister src);
void vbroadcastss(XMMRegister dst, Operand src);
void vbroadcastss(XMMRegister dst, XMMRegister src);
void vbroadcastss(YMMRegister dst, Operand src);
@@ -1569,13 +1596,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovaps(YMMRegister dst, YMMRegister src) { vps(0x28, dst, ymm0, src); }
void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
+ void vmovaps(YMMRegister dst, Operand src) { vps(0x28, dst, ymm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(YMMRegister dst, YMMRegister src) { vps(0x10, dst, ymm0, src); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(YMMRegister dst, Operand src) { vps(0x10, dst, ymm0, src); }
void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
+ void vmovups(Operand dst, YMMRegister src) { vps(0x11, src, ymm0, dst); }
void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
+ void vmovapd(YMMRegister dst, YMMRegister src) { vpd(0x28, dst, ymm0, src); }
void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
+ void vmovupd(YMMRegister dst, Operand src) { vpd(0x10, dst, ymm0, src); }
void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
+ void vmovupd(Operand dst, YMMRegister src) { vpd(0x11, src, ymm0, dst); }
void vmovmskps(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vps(0x50, idst, xmm0, src);
@@ -1775,7 +1810,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vpd(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
// AVX2 instructions
#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
@@ -1945,9 +1982,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dq(Label* label);
diff --git a/deps/v8/src/codegen/x64/fma-instr.h b/deps/v8/src/codegen/x64/fma-instr.h
index c607429e33..83165cc670 100644
--- a/deps/v8/src/codegen/x64/fma-instr.h
+++ b/deps/v8/src/codegen/x64/fma-instr.h
@@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_X64_FMA_INSTR_H_
#define V8_CODEGEN_X64_FMA_INSTR_H_
-#define FMA_INSTRUCTION_LIST(V) \
+#define FMA_SD_INSTRUCTION_LIST(V) \
V(vfmadd132sd, L128, 66, 0F, 38, W1, 99) \
V(vfmadd213sd, L128, 66, 0F, 38, W1, a9) \
V(vfmadd231sd, L128, 66, 0F, 38, W1, b9) \
@@ -17,25 +17,31 @@
V(vfnmadd231sd, L128, 66, 0F, 38, W1, bd) \
V(vfnmsub132sd, L128, 66, 0F, 38, W1, 9f) \
V(vfnmsub213sd, L128, 66, 0F, 38, W1, af) \
- V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf) \
- V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
- V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
- V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
- V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
- V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
- V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
- V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
- V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
- V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
- V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
- V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
- V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf) \
+ V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf)
+
+#define FMA_SS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
+ V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
+ V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
+ V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
+ V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
+ V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
+ V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
+ V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
+ V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
+ V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
+ V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
+ V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf)
+
+#define FMA_PS_INSTRUCTION_LIST(V) \
V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
- V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc) \
+ V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc)
+
+#define FMA_PD_INSTRUCTION_LIST(V) \
V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
@@ -43,4 +49,10 @@
V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
+#define FMA_INSTRUCTION_LIST(V) \
+ FMA_SD_INSTRUCTION_LIST(V) \
+ FMA_SS_INSTRUCTION_LIST(V) \
+ FMA_PS_INSTRUCTION_LIST(V) \
+ FMA_PD_INSTRUCTION_LIST(V)
+
#endif // V8_CODEGEN_X64_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index c8c5903410..6ac8017ca8 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -371,8 +371,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
- Move(value, kZapValue, RelocInfo::NONE);
- Move(slot_address, kZapValue, RelocInfo::NONE);
+ Move(value, kZapValue, RelocInfo::NO_INFO);
+ Move(slot_address, kZapValue, RelocInfo::NO_INFO);
}
}
@@ -669,8 +669,8 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
- Move(slot_address, kZapValue, RelocInfo::NONE);
- Move(value, kZapValue, RelocInfo::NONE);
+ Move(slot_address, kZapValue, RelocInfo::NO_INFO);
+ Move(value, kZapValue, RelocInfo::NO_INFO);
}
}
@@ -819,7 +819,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
- // We don't allow a GC during a store buffer overflow so there is no need to
+ // We don't allow a GC in a write barrier slow path so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
int bytes = 0;
@@ -906,99 +906,6 @@ void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
}
}
-// Helper macro to define qfma macro-assembler. This takes care of every
-// possible case of register aliasing to minimize the number of instructions.
-#define QFMA(ps_or_pd) \
- if (CpuFeatures::IsSupported(FMA3)) { \
- CpuFeatureScope fma3_scope(this, FMA3); \
- if (dst == src1) { \
- vfmadd231##ps_or_pd(dst, src2, src3); \
- } else if (dst == src2) { \
- vfmadd132##ps_or_pd(dst, src1, src3); \
- } else if (dst == src3) { \
- vfmadd213##ps_or_pd(dst, src2, src1); \
- } else { \
- vmovups(dst, src1); \
- vfmadd231##ps_or_pd(dst, src2, src3); \
- } \
- } else if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vadd##ps_or_pd(dst, src1, tmp); \
- } else { \
- if (dst == src1) { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- add##ps_or_pd(dst, tmp); \
- } else if (dst == src2) { \
- DCHECK_NE(src2, src1); \
- mul##ps_or_pd(src2, src3); \
- add##ps_or_pd(src2, src1); \
- } else if (dst == src3) { \
- DCHECK_NE(src3, src1); \
- mul##ps_or_pd(src3, src2); \
- add##ps_or_pd(src3, src1); \
- } else { \
- movaps(dst, src2); \
- mul##ps_or_pd(dst, src3); \
- add##ps_or_pd(dst, src1); \
- } \
- }
-
-// Helper macro to define qfms macro-assembler. This takes care of every
-// possible case of register aliasing to minimize the number of instructions.
-#define QFMS(ps_or_pd) \
- if (CpuFeatures::IsSupported(FMA3)) { \
- CpuFeatureScope fma3_scope(this, FMA3); \
- if (dst == src1) { \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
- } else if (dst == src2) { \
- vfnmadd132##ps_or_pd(dst, src1, src3); \
- } else if (dst == src3) { \
- vfnmadd213##ps_or_pd(dst, src2, src1); \
- } else { \
- vmovups(dst, src1); \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
- } \
- } else if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vsub##ps_or_pd(dst, src1, tmp); \
- } else { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- if (dst != src1) { \
- movaps(dst, src1); \
- } \
- sub##ps_or_pd(dst, tmp); \
- }
-
-void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMA(ps)
-}
-
-void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMS(ps)
-}
-
-void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMA(pd);
-}
-
-void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMS(pd);
-}
-
-#undef QFMOP
-
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1561,7 +1468,7 @@ void TurboAssembler::Move(Register dst, Smi source) {
if (value == 0) {
xorl(dst, dst);
} else if (SmiValuesAre32Bits() || value < 0) {
- Move(dst, source.ptr(), RelocInfo::NONE);
+ Move(dst, source.ptr(), RelocInfo::NO_INFO);
} else {
uint32_t uvalue = static_cast<uint32_t>(source.ptr());
Move(dst, uvalue);
@@ -1596,7 +1503,7 @@ void TurboAssembler::Move(Register dst, Register src) {
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
void TurboAssembler::Move(Register dst, Immediate src) {
- if (src.rmode() == RelocInfo::Mode::NONE) {
+ if (src.rmode() == RelocInfo::Mode::NO_INFO) {
Move(dst, src.value());
} else {
movl(dst, src);
@@ -1920,7 +1827,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
j(cc, code_object, rmode);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kOffHeapTrampolineRegister);
}
@@ -1931,7 +1838,7 @@ void TurboAssembler::Call(ExternalReference ext) {
}
void TurboAssembler::Call(Operand op) {
- if (!CpuFeatures::IsSupported(ATOM)) {
+ if (!CpuFeatures::IsSupported(INTEL_ATOM)) {
call(op);
} else {
movq(kScratchRegister, op);
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index da57d4629a..262162ded0 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -177,15 +177,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
uint32_t* load_pc_offset = nullptr);
- void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
-
void Lzcntq(Register dst, Register src);
void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
@@ -335,7 +326,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
- DCHECK(rmode == RelocInfo::NONE || rmode > RelocInfo::LAST_GCED_ENUM);
+ DCHECK(rmode == RelocInfo::NO_INFO || rmode > RelocInfo::LAST_GCED_ENUM);
movq(dst, Immediate64(ptr, rmode));
}
@@ -784,7 +775,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void PopQuad(Operand dst);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.