summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-05-05 09:19:02 +0200
committerMichaël Zasso <targos@protonmail.com>2020-05-12 16:12:13 +0200
commit1d6adf7432defeb39b751a19c68335e8afb0d8ee (patch)
tree7ab67931110b8d9db770d774c7a6d0d14c976c15 /deps/v8/src/interpreter
parentaee36a04475a20c13663d1037aa6f175ff368bc7 (diff)
downloadnode-new-1d6adf7432defeb39b751a19c68335e8afb0d8ee.tar.gz
deps: update V8 to 8.3.110.9
PR-URL: https://github.com/nodejs/node/pull/32831 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc65
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h17
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc40
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h14
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc460
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h41
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc15
-rw-r--r--deps/v8/src/interpreter/bytecodes.h10
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc37
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h15
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc31
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h26
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc7
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc35
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc137
-rw-r--r--deps/v8/src/interpreter/interpreter.cc51
-rw-r--r--deps/v8/src/interpreter/interpreter.h3
21 files changed, 668 insertions, 351 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 1c61776cdf..cc65545138 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -81,7 +82,9 @@ Register BytecodeArrayBuilder::Local(int index) const {
return Register(index);
}
-Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
+template <typename LocalIsolate>
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ LocalIsolate* isolate) {
DCHECK(RemainderOfBlockIsDead());
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
@@ -99,19 +102,35 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
isolate, register_count, parameter_count(), handler_table);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeArrayBuilder::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeArrayBuilder::CheckBytecodeMatches(BytecodeArray bytecode) {
+ DisallowHeapAllocation no_gc;
return bytecode_array_writer_.CheckBytecodeMatches(bytecode);
}
#endif
+template <typename LocalIsolate>
Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK(RemainderOfBlockIsDead());
return bytecode_array_writer_.ToSourcePositionTable(isolate);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
+ OffThreadIsolate* isolate);
+
BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
Bytecode bytecode) {
BytecodeSourceInfo source_position;
@@ -1237,32 +1256,26 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(
- BytecodeLoopHeader* loop_header, int loop_depth) {
- OutputJumpLoop(loop_header, loop_depth);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
- BytecodeJumpTable* jump_table) {
- OutputSwitchOnSmiNoFeedback(jump_table);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+ BytecodeLoopHeader* loop_header, int loop_depth, int position) {
if (position != kNoSourcePosition) {
- // We need to attach a non-breakable source position to a stack
- // check, so we simply add it as expression position. There can be
- // a prior statement position from constructs like:
+ // We need to attach a non-breakable source position to JumpLoop for its
+ // implicit stack check, so we simply add it as expression position. There
+ // can be a prior statement position from constructs like:
//
// do var x; while (false);
//
// A Nop could be inserted for empty statements, but since no code
- // is associated with these positions, instead we force the stack
- // check's expression position which eliminates the empty
- // statement's position.
+ // is associated with these positions, instead we force the jump loop's
+ // expression position which eliminates the empty statement's position.
latest_source_info_.ForceExpressionPosition(position);
}
- OutputStackCheck();
+ OutputJumpLoop(loop_header, loop_depth);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ OutputSwitchOnSmiNoFeedback(jump_table);
return *this;
}
@@ -1619,6 +1632,14 @@ uint32_t BytecodeArrayBuilder::GetOutputRegisterListOperand(
return static_cast<uint32_t>(reg_list.first_register().ToOperand());
}
+void BytecodeArrayBuilder::EmitFunctionStartSourcePosition(int position) {
+ bytecode_array_writer_.SetFunctionEntrySourcePosition(position);
+ // Force an expression position to make sure we have one. If the next bytecode
+ // overwrites it, it’s fine since it would mean we have a source position
+ // anyway.
+ latest_source_info_.ForceExpressionPosition(position);
+}
+
std::ostream& operator<<(std::ostream& os,
const BytecodeArrayBuilder::ToBooleanMode& mode) {
switch (mode) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 39cd4fa6f6..cad4f473a2 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
+#include "src/base/export-template.h"
#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-flags.h"
@@ -42,11 +43,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
- Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
// Get the number of parameters expected by function.
@@ -422,7 +427,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLoopHeader* loop_header,
- int loop_depth);
+ int loop_depth, int position);
BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label);
@@ -439,8 +444,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& SwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
- BytecodeArrayBuilder& StackCheck(int position);
-
// Sets the pending message to the value in the accumulator, and returns the
// previous pending message in the accumulator.
BytecodeArrayBuilder& SetPendingMessage();
@@ -549,6 +552,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
void OutputStarRaw(Register reg);
void OutputMovRaw(Register src, Register dest);
+ void EmitFunctionStartSourcePosition(int position);
+
// Accessors
BytecodeRegisterAllocator* register_allocator() {
return &register_allocator_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3a459b4833..a1b9d9d5f6 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-array-writer.h"
#include "src/api/api-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
@@ -36,8 +37,9 @@ BytecodeArrayWriter::BytecodeArrayWriter(
bytecodes_.reserve(512); // Derived via experimentation.
}
+template <typename LocalIsolate>
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
+ LocalIsolate* isolate, int register_count, int parameter_count,
Handle<ByteArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
@@ -52,27 +54,45 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
return bytecode_array;
}
-Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(Isolate* isolate) {
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+ Isolate* isolate, int register_count, int parameter_count,
+ Handle<ByteArray> handler_table);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+ OffThreadIsolate* isolate, int register_count, int parameter_count,
+ Handle<ByteArray> handler_table);
+
+template <typename LocalIsolate>
+Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ LocalIsolate* isolate) {
DCHECK(!source_position_table_builder_.Lazy());
Handle<ByteArray> source_position_table =
source_position_table_builder_.Omit()
- ? ReadOnlyRoots(isolate).empty_byte_array_handle()
+ ? isolate->factory()->empty_byte_array()
: source_position_table_builder_.ToSourcePositionTable(isolate);
return source_position_table;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeArrayWriter::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeArrayWriter::CheckBytecodeMatches(BytecodeArray bytecode) {
int mismatches = false;
int bytecode_size = static_cast<int>(bytecodes()->size());
const byte* bytecode_ptr = &bytecodes()->front();
- if (bytecode_size != bytecode->length()) mismatches = true;
+ if (bytecode_size != bytecode.length()) mismatches = true;
// If there's a mismatch only in the length of the bytecode (very unlikely)
// then the first mismatch will be the first extra bytecode.
- int first_mismatch = std::min(bytecode_size, bytecode->length());
+ int first_mismatch = std::min(bytecode_size, bytecode.length());
for (int i = 0; i < first_mismatch; ++i) {
- if (bytecode_ptr[i] != bytecode->get(i)) {
+ if (bytecode_ptr[i] != bytecode.get(i)) {
mismatches = true;
first_mismatch = i;
break;
@@ -187,6 +207,12 @@ void BytecodeArrayWriter::BindTryRegionEnd(
handler_table_builder->SetTryRegionEnd(handler_id, current_offset);
}
+void BytecodeArrayWriter::SetFunctionEntrySourcePosition(int position) {
+ bool is_statement = false;
+ source_position_table_builder_.AddPosition(
+ kFunctionEntryBytecodeOffset, SourcePosition(position), is_statement);
+}
+
void BytecodeArrayWriter::StartBasicBlock() {
InvalidateLastBytecode();
exit_seen_in_block_ = false;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 22f0296aff..c1f4266e49 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -51,15 +51,21 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void BindTryRegionEnd(HandlerTableBuilder* handler_table_builder,
int handler_id);
- Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
- int parameter_count,
+ void SetFunctionEntrySourcePosition(int position);
+
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate,
+ int register_count, int parameter_count,
Handle<ByteArray> handler_table);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
// Returns -1 if they match or the offset of the first mismatching byte.
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
bool RemainderOfBlockIsDead() const { return exit_seen_in_block_; }
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 196f9d8819..18a2fc9913 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -17,6 +17,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/logging/log.h"
+#include "src/logging/off-thread-logger.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
@@ -24,6 +25,7 @@
#include "src/objects/template-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -409,10 +411,7 @@ class BytecodeGenerator::ControlScopeForIteration final
LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {
- generator->loop_depth_++;
- }
- ~ControlScopeForIteration() override { generator()->loop_depth_--; }
+ loop_builder_(loop_builder) {}
protected:
bool Execute(Command command, Statement* statement,
@@ -724,47 +723,72 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
DISALLOW_COPY_AND_ASSIGN(TestResultScope);
};
-// Used to build a list of global declaration initial value pairs.
-class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
+// Used to build a list of toplevel declaration data.
+class BytecodeGenerator::TopLevelDeclarationsBuilder final : public ZoneObject {
public:
+ template <typename LocalIsolate>
Handle<FixedArray> AllocateDeclarations(UnoptimizedCompilationInfo* info,
BytecodeGenerator* generator,
Handle<Script> script,
- Isolate* isolate) {
- int size = 0;
- for (Declaration* decl : *info->scope()->declarations()) {
- Variable* var = decl->var();
- if (!var->is_used()) continue;
- if (var->location() != VariableLocation::UNALLOCATED) continue;
- DCHECK_IMPLIES(decl->node_type() != AstNode::kVariableDeclaration,
- decl->node_type() == AstNode::kFunctionDeclaration);
- size += decl->node_type() == AstNode::kVariableDeclaration ? 1 : 2;
- }
-
+ LocalIsolate* isolate) {
DCHECK(has_constant_pool_entry_);
Handle<FixedArray> data =
- isolate->factory()->NewFixedArray(size, AllocationType::kOld);
+ isolate->factory()->NewFixedArray(entry_slots_, AllocationType::kOld);
int array_index = 0;
- for (Declaration* decl : *info->scope()->declarations()) {
- Variable* var = decl->var();
- if (!var->is_used()) continue;
- if (var->location() != VariableLocation::UNALLOCATED) continue;
- if (decl->node_type() == AstNode::kVariableDeclaration) {
- data->set(array_index++, *var->raw_name()->string().get<Factory>());
- } else {
- FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
- Handle<Object> sfi(Compiler::GetSharedFunctionInfo(f, script, isolate));
- // Return a null handle if any initial values can't be created. Caller
- // will set stack overflow.
- if (sfi.is_null()) return Handle<FixedArray>();
- data->set(array_index++, *sfi);
- int literal_index = generator->GetCachedCreateClosureSlot(f);
- data->set(array_index++, Smi::FromInt(literal_index));
+ if (info->scope()->is_module_scope()) {
+ for (Declaration* decl : *info->scope()->declarations()) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() != VariableLocation::MODULE) continue;
+#ifdef DEBUG
+ int start = array_index;
+#endif
+ if (decl->IsFunctionDeclaration()) {
+ FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
+ Handle<SharedFunctionInfo> sfi(
+ Compiler::GetSharedFunctionInfo(f, script, isolate));
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (sfi.is_null()) return Handle<FixedArray>();
+ data->set(array_index++, *sfi);
+ int literal_index = generator->GetCachedCreateClosureSlot(f);
+ data->set(array_index++, Smi::FromInt(literal_index));
+ DCHECK(var->IsExport());
+ data->set(array_index++, Smi::FromInt(var->index()));
+ DCHECK_EQ(start + kModuleFunctionDeclarationSize, array_index);
+ } else if (var->IsExport() && var->binding_needs_init()) {
+ data->set(array_index++, Smi::FromInt(var->index()));
+ DCHECK_EQ(start + kModuleVariableDeclarationSize, array_index);
+ }
+ }
+ } else {
+ for (Declaration* decl : *info->scope()->declarations()) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() != VariableLocation::UNALLOCATED) continue;
+#ifdef DEBUG
+ int start = array_index;
+#endif
+ if (decl->IsVariableDeclaration()) {
+ data->set(array_index++, *var->raw_name()->string());
+ DCHECK_EQ(start + kGlobalVariableDeclarationSize, array_index);
+ } else {
+ FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
+ Handle<SharedFunctionInfo> sfi(
+ Compiler::GetSharedFunctionInfo(f, script, isolate));
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (sfi.is_null()) return Handle<FixedArray>();
+ data->set(array_index++, *sfi);
+ int literal_index = generator->GetCachedCreateClosureSlot(f);
+ data->set(array_index++, Smi::FromInt(literal_index));
+ DCHECK_EQ(start + kGlobalFunctionDeclarationSize, array_index);
+ }
}
}
-
+ DCHECK_EQ(array_index, data->length());
return data;
}
@@ -774,21 +798,37 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
}
void set_constant_pool_entry(size_t constant_pool_entry) {
- DCHECK(has_global_declaration());
+ DCHECK(has_top_level_declaration());
DCHECK(!has_constant_pool_entry_);
constant_pool_entry_ = constant_pool_entry;
has_constant_pool_entry_ = true;
}
- void record_global_declaration() { has_seen_global_declaration_ = true; }
- bool has_global_declaration() { return has_seen_global_declaration_; }
+ void record_global_variable_declaration() {
+ entry_slots_ += kGlobalVariableDeclarationSize;
+ }
+ void record_global_function_declaration() {
+ entry_slots_ += kGlobalFunctionDeclarationSize;
+ }
+ void record_module_variable_declaration() {
+ entry_slots_ += kModuleVariableDeclarationSize;
+ }
+ void record_module_function_declaration() {
+ entry_slots_ += kModuleFunctionDeclarationSize;
+ }
+ bool has_top_level_declaration() { return entry_slots_ > 0; }
bool processed() { return processed_; }
void mark_processed() { processed_ = true; }
private:
+ const int kGlobalVariableDeclarationSize = 1;
+ const int kGlobalFunctionDeclarationSize = 2;
+ const int kModuleVariableDeclarationSize = 1;
+ const int kModuleFunctionDeclarationSize = 3;
+
size_t constant_pool_entry_ = 0;
+ int entry_slots_ = 0;
bool has_constant_pool_entry_ = false;
- bool has_seen_global_declaration_ = false;
bool processed_ = false;
};
@@ -910,6 +950,36 @@ class BytecodeGenerator::OptionalChainNullLabelScope final {
BytecodeLabels* prev_;
};
+// LoopScope delimits the scope of {loop}, from its header to its final jump.
+// It should be constructed iff a (conceptual) back edge should be produced. In
+// the case of creating a LoopBuilder but never emitting the loop, it is valid
+// to skip the creation of LoopScope.
+class BytecodeGenerator::LoopScope final {
+ public:
+ explicit LoopScope(BytecodeGenerator* bytecode_generator, LoopBuilder* loop)
+ : bytecode_generator_(bytecode_generator),
+ parent_loop_scope_(bytecode_generator_->current_loop_scope()),
+ loop_builder_(loop) {
+ loop_builder_->LoopHeader();
+ bytecode_generator_->set_current_loop_scope(this);
+ bytecode_generator_->loop_depth_++;
+ }
+
+ ~LoopScope() {
+ bytecode_generator_->loop_depth_--;
+ bytecode_generator_->set_current_loop_scope(parent_loop_scope_);
+ DCHECK_GE(bytecode_generator_->loop_depth_, 0);
+ loop_builder_->JumpToHeader(
+ bytecode_generator_->loop_depth_,
+ parent_loop_scope_ ? parent_loop_scope_->loop_builder_ : nullptr);
+ }
+
+ private:
+ BytecodeGenerator* const bytecode_generator_;
+ LoopScope* const parent_loop_scope_;
+ LoopBuilder* const loop_builder_;
+};
+
namespace {
template <typename PropertyT>
@@ -983,7 +1053,7 @@ BytecodeGenerator::BytecodeGenerator(
current_scope_(info->scope()),
eager_inner_literals_(eager_inner_literals),
feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())),
- globals_builder_(new (zone()) GlobalDeclarationsBuilder()),
+ top_level_builder_(new (zone()) TopLevelDeclarationsBuilder()),
block_coverage_builder_(nullptr),
function_literals_(0, zone()),
native_function_literals_(0, zone()),
@@ -1000,6 +1070,7 @@ BytecodeGenerator::BytecodeGenerator(
generator_jump_table_(nullptr),
suspend_count_(0),
loop_depth_(0),
+ current_loop_scope_(nullptr),
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
if (info->has_source_range_map()) {
@@ -1008,22 +1079,49 @@ BytecodeGenerator::BytecodeGenerator(
}
}
+namespace {
+
+template <typename Isolate>
+struct NullContextScopeHelper;
+
+template <>
+struct NullContextScopeHelper<Isolate> {
+ using Type = NullContextScope;
+};
+
+template <>
+struct NullContextScopeHelper<OffThreadIsolate> {
+ class DummyNullContextScope {
+ public:
+ explicit DummyNullContextScope(OffThreadIsolate*) {}
+ };
+ using Type = DummyNullContextScope;
+};
+
+template <typename Isolate>
+using NullContextScopeFor = typename NullContextScopeHelper<Isolate>::Type;
+
+} // namespace
+
+template <typename LocalIsolate>
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
- Isolate* isolate, Handle<Script> script) {
+ LocalIsolate* isolate, Handle<Script> script) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScope null_context_scope(isolate);
+ NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
AllocateDeferredConstants(isolate, script);
if (block_coverage_builder_) {
- info()->set_coverage_info(
- isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots()));
+ Handle<CoverageInfo> coverage_info =
+ isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots());
+ info()->set_coverage_info(coverage_info);
if (FLAG_trace_block_coverage) {
- info()->coverage_info()->Print(info()->literal()->GetDebugName());
+ StdoutStream os;
+ coverage_info->CoverageInfoPrint(os, info()->literal()->GetDebugName());
}
}
@@ -1038,13 +1136,19 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
return bytecode_array;
}
+template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
+ Isolate* isolate, Handle<Script> script);
+template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
+ OffThreadIsolate* isolate, Handle<Script> script);
+
+template <typename LocalIsolate>
Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScope null_context_scope(isolate);
+ NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
Handle<ByteArray> source_position_table =
@@ -1058,21 +1162,27 @@ Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
return source_position_table;
}
+template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
+ Isolate* isolate);
+template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeGenerator::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeGenerator::CheckBytecodeMatches(BytecodeArray bytecode) {
return builder()->CheckBytecodeMatches(bytecode);
}
#endif
-void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
+template <typename LocalIsolate>
+void BytecodeGenerator::AllocateDeferredConstants(LocalIsolate* isolate,
Handle<Script> script) {
- if (globals_builder()->has_global_declaration()) {
+ if (top_level_builder()->has_top_level_declaration()) {
// Build global declaration pair array.
- Handle<FixedArray> declarations =
- globals_builder()->AllocateDeclarations(info(), this, script, isolate);
+ Handle<FixedArray> declarations = top_level_builder()->AllocateDeclarations(
+ info(), this, script, isolate);
if (declarations.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(
- globals_builder()->constant_pool_entry(), declarations);
+ top_level_builder()->constant_pool_entry(), declarations);
}
// Find or build shared function infos.
@@ -1087,6 +1197,9 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
// Find or build shared function infos for the native function templates.
for (std::pair<NativeFunctionLiteral*, size_t> literal :
native_function_literals_) {
+ // This should only happen for main-thread compilations.
+ DCHECK((std::is_same<Isolate, v8::internal::Isolate>::value));
+
NativeFunctionLiteral* expr = literal.first;
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -1142,6 +1255,18 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
}
}
+template void BytecodeGenerator::AllocateDeferredConstants(
+ Isolate* isolate, Handle<Script> script);
+template void BytecodeGenerator::AllocateDeferredConstants(
+ OffThreadIsolate* isolate, Handle<Script> script);
+
+namespace {
+bool NeedsContextInitialization(DeclarationScope* scope) {
+ return scope->NeedsContext() && !scope->is_script_scope() &&
+ !scope->is_module_scope();
+}
+} // namespace
+
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@@ -1159,14 +1284,14 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
AllocateTopLevelRegisters();
- // Perform a stack-check before the body.
- builder()->StackCheck(info()->literal()->start_position());
+ builder()->EmitFunctionStartSourcePosition(
+ info()->literal()->start_position());
if (info()->literal()->CanSuspend()) {
BuildGeneratorPrologue();
}
- if (closure_scope()->NeedsContext() && !closure_scope()->is_script_scope()) {
+ if (NeedsContextInitialization(closure_scope())) {
// Push a new inner context scope for the function.
BuildNewLocalActivationContext();
ContextScope local_function_context(this, closure_scope());
@@ -1223,6 +1348,8 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Visit declarations within the function scope.
if (closure_scope()->is_script_scope()) {
VisitGlobalDeclarations(closure_scope()->declarations());
+ } else if (closure_scope()->is_module_scope()) {
+ VisitModuleDeclarations(closure_scope()->declarations());
} else {
VisitDeclarations(closure_scope()->declarations());
}
@@ -1232,7 +1359,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind())) {
- if (literal->requires_brand_initialization()) {
+ if (literal->class_scope_has_private_brand()) {
BuildPrivateBrandInitialization(builder()->Receiver());
}
@@ -1320,6 +1447,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
+ case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::LOCAL:
if (variable->binding_needs_init()) {
@@ -1355,13 +1483,6 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
.CallRuntime(Runtime::kDeclareEvalVar, name);
break;
}
- case VariableLocation::MODULE:
- if (variable->IsExport() && variable->binding_needs_init()) {
- builder()->LoadTheHole();
- BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
- }
- // Nothing to do for imports.
- break;
}
}
@@ -1375,6 +1496,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
+ case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1400,12 +1522,6 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Runtime::kDeclareEvalFunction, args);
break;
}
- case VariableLocation::MODULE:
- DCHECK_EQ(variable->mode(), VariableMode::kLet);
- DCHECK(variable->IsExport());
- VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
- break;
}
DCHECK_IMPLIES(
eager_inner_literals_ != nullptr && decl->fun()->ShouldEagerCompile(),
@@ -1430,18 +1546,60 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
}
}
+void BytecodeGenerator::BuildDeclareCall(Runtime::FunctionId id) {
+ if (!top_level_builder()->has_top_level_declaration()) return;
+ DCHECK(!top_level_builder()->processed());
+
+ top_level_builder()->set_constant_pool_entry(
+ builder()->AllocateDeferredConstantPoolEntry());
+
+ // Emit code to declare globals.
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadConstantPoolEntry(top_level_builder()->constant_pool_entry())
+ .StoreAccumulatorInRegister(args[0])
+ .MoveRegister(Register::function_closure(), args[1])
+ .CallRuntime(id, args);
+
+ top_level_builder()->mark_processed();
+}
+
+void BytecodeGenerator::VisitModuleDeclarations(Declaration::List* decls) {
+ RegisterAllocationScope register_scope(this);
+ for (Declaration* decl : *decls) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() == VariableLocation::MODULE) {
+ if (decl->IsFunctionDeclaration()) {
+ DCHECK(var->IsExport());
+ FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
+ AddToEagerLiteralsIfEager(f->fun());
+ top_level_builder()->record_module_function_declaration();
+ } else if (var->IsExport() && var->binding_needs_init()) {
+ DCHECK(decl->IsVariableDeclaration());
+ top_level_builder()->record_module_variable_declaration();
+ }
+ } else {
+ RegisterAllocationScope register_scope(this);
+ Visit(decl);
+ }
+ }
+ BuildDeclareCall(Runtime::kDeclareModuleExports);
+}
+
void BytecodeGenerator::VisitGlobalDeclarations(Declaration::List* decls) {
RegisterAllocationScope register_scope(this);
- bool has_global_declaration = false;
for (Declaration* decl : *decls) {
Variable* var = decl->var();
DCHECK(var->is_used());
if (var->location() == VariableLocation::UNALLOCATED) {
// var or function.
- has_global_declaration = true;
if (decl->IsFunctionDeclaration()) {
+ top_level_builder()->record_global_function_declaration();
FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
AddToEagerLiteralsIfEager(f->fun());
+ } else {
+ top_level_builder()->record_global_variable_declaration();
}
} else {
// let or const. Handled in NewScriptContext.
@@ -1450,22 +1608,7 @@ void BytecodeGenerator::VisitGlobalDeclarations(Declaration::List* decls) {
}
}
- if (!has_global_declaration) return;
- globals_builder()->record_global_declaration();
- DCHECK(!globals_builder()->processed());
-
- globals_builder()->set_constant_pool_entry(
- builder()->AllocateDeferredConstantPoolEntry());
-
- // Emit code to declare globals.
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
- .StoreAccumulatorInRegister(args[0])
- .MoveRegister(Register::function_closure(), args[1])
- .CallRuntime(Runtime::kDeclareGlobals, args);
-
- globals_builder()->mark_processed();
+ BuildDeclareCall(Runtime::kDeclareGlobals);
}
void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
@@ -1717,7 +1860,6 @@ void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
ControlScopeForIteration execution_control(this, stmt, loop_builder);
- builder()->StackCheck(stmt->position());
Visit(stmt->body());
loop_builder->BindContinueTarget();
}
@@ -1725,20 +1867,22 @@ void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond()->ToBooleanIsFalse()) {
+ // Since we know that the condition is false, we don't create a loop.
+ // Therefore, we don't create a LoopScope (and thus we don't create a header
+ // and a JumpToHeader). However, we still need to iterate once through the
+ // body.
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
} else {
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_backbranch(zone());
VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(),
TestFallthrough::kThen);
loop_backbranch.Bind(builder());
- loop_builder.JumpToHeader(loop_depth_);
}
}
@@ -1750,7 +1894,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
return;
}
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1759,22 +1903,21 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
-
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
+
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
// If the condition is known to be false there is no need to generate
// body, next or condition blocks. Init block should be generated.
return;
}
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1787,7 +1930,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
@@ -1821,7 +1963,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
{
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
@@ -1843,7 +1985,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
- loop_builder.JumpToHeader(loop_depth_);
}
builder()->Bind(&subject_undefined_label);
}
@@ -1895,7 +2036,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Register next_result = register_allocator()->NewRegister();
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
builder()->LoadTrue().StoreAccumulatorInRegister(done);
@@ -1927,8 +2068,6 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
VisitIterationBody(stmt, &loop_builder);
-
- loop_builder.JumpToHeader(loop_depth_);
},
// Finally block.
[&](Register iteration_continuation_token) {
@@ -2391,11 +2530,6 @@ void BytecodeGenerator::VisitNativeFunctionLiteral(
native_function_literals_.push_back(std::make_pair(expr, entry));
}
-void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
- VisitBlock(expr->block());
- VisitVariableProxy(expr->result());
-}
-
void BytecodeGenerator::VisitConditional(Conditional* expr) {
ConditionalControlFlowBuilder conditional_builder(
builder(), block_coverage_builder_, expr);
@@ -2747,7 +2881,7 @@ void BytecodeGenerator::BuildFillArrayWithIterator(
DCHECK(value.is_valid());
LoopBuilder loop_builder(builder(), nullptr, nullptr);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
// Call the iterator's .next() method. Break from the loop if the `done`
// property is truthy, otherwise load the value from the iterator result and
@@ -2770,7 +2904,6 @@ void BytecodeGenerator::BuildFillArrayWithIterator(
.UnaryOperation(Token::INC, feedback_index(index_slot))
.StoreAccumulatorInRegister(index);
loop_builder.BindContinueTarget();
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::BuildCreateArrayLiteral(
@@ -2932,19 +3065,6 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
BuildCreateArrayLiteral(expr->values(), expr);
}
-void BytecodeGenerator::VisitStoreInArrayLiteral(StoreInArrayLiteral* expr) {
- builder()->SetExpressionAsStatementPosition(expr);
- RegisterAllocationScope register_scope(this);
- Register array = register_allocator()->NewRegister();
- Register index = register_allocator()->NewRegister();
- VisitForRegisterValue(expr->array(), array);
- VisitForRegisterValue(expr->index(), index);
- VisitForAccumulatorValue(expr->value());
- builder()->StoreInArrayLiteral(
- array, index,
- feedback_index(feedback_spec()->AddStoreInArrayLiteralICSlot()));
-}
-
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
@@ -4247,8 +4367,8 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// - One for awaiting the iterator result yielded by the delegated
// iterator
- LoopBuilder loop(builder(), nullptr, nullptr);
- loop.LoopHeader();
+ LoopBuilder loop_builder(builder(), nullptr, nullptr);
+ LoopScope loop_scope(this, &loop_builder);
{
BytecodeLabels after_switch(zone());
@@ -4329,7 +4449,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
output, ast_string_constants()->done_string(),
feedback_index(feedback_spec()->AddLoadICSlot()));
- loop.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
// Suspend the current generator.
if (iterator_type == IteratorType::kNormal) {
@@ -4360,8 +4480,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
generator_object())
.StoreAccumulatorInRegister(resume_mode);
- loop.BindContinueTarget();
- loop.JumpToHeader(loop_depth_);
+ loop_builder.BindContinueTarget();
}
}
@@ -4553,16 +4672,37 @@ void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
ClassScope* scope = private_name->scope()->AsClassScope();
if (private_name->is_static()) {
- DCHECK_NOT_NULL(scope->class_variable());
// For static private methods, the only valid receiver is the class.
// Load the class constructor.
- BuildVariableLoadForAccumulatorValue(scope->class_variable(),
- HoleCheckMode::kElided);
- BytecodeLabel return_check;
- builder()->CompareReference(object).JumpIfTrue(
- ToBooleanMode::kAlreadyBoolean, &return_check);
- BuildInvalidPropertyAccess(tmpl, property);
- builder()->Bind(&return_check);
+ if (scope->class_variable() == nullptr) {
+ // If the static private method has not been used used in source
+ // code (either explicitly or through the presence of eval), but is
+ // accessed by the debugger at runtime, reference to the class variable
+ // is not available since it was not be context-allocated. Therefore we
+ // can't build a branch check, and throw an ReferenceError as if the
+ // method was optimized away.
+ // TODO(joyee): get a reference to the class constructor through
+ // something other than scope->class_variable() in this scenario.
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(
+ MessageTemplate::
+ kInvalidUnusedPrivateStaticMethodAccessedByDebugger))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewError, args)
+ .Throw();
+ } else {
+ BuildVariableLoadForAccumulatorValue(scope->class_variable(),
+ HoleCheckMode::kElided);
+ BytecodeLabel return_check;
+ builder()->CompareReference(object).JumpIfTrue(
+ ToBooleanMode::kAlreadyBoolean, &return_check);
+ BuildInvalidPropertyAccess(tmpl, property);
+ builder()->Bind(&return_check);
+ }
} else {
BuildVariableLoadForAccumulatorValue(scope->brand(),
HoleCheckMode::kElided);
@@ -4901,7 +5041,7 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
Register instance = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(instance);
- if (info()->literal()->requires_brand_initialization()) {
+ if (info()->literal()->class_scope_has_private_brand()) {
BuildPrivateBrandInitialization(instance);
}
@@ -5999,37 +6139,23 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
DCHECK_EQ(current_scope(), closure_scope());
// Create the appropriate context.
- if (scope->is_module_scope()) {
- // We don't need to do anything for the outer script scope.
- DCHECK(scope->outer_scope()->is_script_scope());
-
- // A JSFunction representing a module is called with the module object as
- // its sole argument.
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(builder()->Parameter(0), args[0])
- .LoadLiteral(scope)
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kPushModuleContext, args);
- } else {
- DCHECK(scope->is_function_scope() || scope->is_eval_scope());
- int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- switch (scope->scope_type()) {
- case EVAL_SCOPE:
- builder()->CreateEvalContext(scope, slot_count);
- break;
- case FUNCTION_SCOPE:
- builder()->CreateFunctionContext(scope, slot_count);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- Register arg = register_allocator()->NewRegister();
- builder()->LoadLiteral(scope).StoreAccumulatorInRegister(arg).CallRuntime(
- Runtime::kNewFunctionContext, arg);
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
+ switch (scope->scope_type()) {
+ case EVAL_SCOPE:
+ builder()->CreateEvalContext(scope, slot_count);
+ break;
+ case FUNCTION_SCOPE:
+ builder()->CreateFunctionContext(scope, slot_count);
+ break;
+ default:
+ UNREACHABLE();
}
+ } else {
+ Register arg = register_allocator()->NewRegister();
+ builder()->LoadLiteral(scope).StoreAccumulatorInRegister(arg).CallRuntime(
+ Runtime::kNewFunctionContext, arg);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 15b33bcac3..aa461d523c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -24,7 +24,7 @@ enum class SourceRangeKind;
namespace interpreter {
-class GlobalDeclarationsBuilder;
+class TopLevelDeclarationsBuilder;
class LoopBuilder;
class BlockCoverageBuilder;
class BytecodeJumpTable;
@@ -37,12 +37,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
std::vector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
- Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
+ template <typename LocalIsolate>
+ Handle<BytecodeArray> FinalizeBytecode(LocalIsolate* isolate,
Handle<Script> script);
- Handle<ByteArray> FinalizeSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<ByteArray> FinalizeSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -50,11 +52,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
+ void VisitModuleDeclarations(Declaration::List* declarations);
void VisitGlobalDeclarations(Declaration::List* declarations);
void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(const ZonePtrList<Statement>* statments);
private:
+ class AccumulatorPreservingScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -63,17 +67,17 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class ControlScopeForTryCatch;
class ControlScopeForTryFinally;
class CurrentScope;
- class ExpressionResultScope;
class EffectResultScope;
+ class ExpressionResultScope;
class FeedbackSlotCache;
- class GlobalDeclarationsBuilder;
class IteratorRecord;
+ class LoopScope;
class NaryCodeCoverageSlots;
+ class OptionalChainNullLabelScope;
class RegisterAllocationScope;
- class AccumulatorPreservingScope;
class TestResultScope;
+ class TopLevelDeclarationsBuilder;
class ValueResultScope;
- class OptionalChainNullLabelScope;
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
@@ -160,7 +164,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
};
void GenerateBytecodeBody();
- void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
+ template <typename LocalIsolate>
+ void AllocateDeferredConstants(LocalIsolate* isolate, Handle<Script> script);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -221,6 +226,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildThisVariableLoad();
+ void BuildDeclareCall(Runtime::FunctionId id);
+
Expression* GetDestructuringDefaultValue(Expression** target);
void BuildDestructuringArrayAssignment(
ArrayLiteral* pattern, Token::Value op,
@@ -462,9 +469,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return builder()->register_allocator();
}
- GlobalDeclarationsBuilder* globals_builder() {
- DCHECK_NOT_NULL(globals_builder_);
- return globals_builder_;
+ TopLevelDeclarationsBuilder* top_level_builder() {
+ DCHECK_NOT_NULL(top_level_builder_);
+ return top_level_builder_;
}
inline LanguageMode language_mode() const;
inline FunctionKind function_kind() const;
@@ -482,6 +489,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
catch_prediction_ = value;
}
+ LoopScope* current_loop_scope() const { return current_loop_scope_; }
+ void set_current_loop_scope(LoopScope* loop_scope) {
+ current_loop_scope_ = loop_scope;
+ }
+
Zone* zone_;
BytecodeArrayBuilder builder_;
UnoptimizedCompilationInfo* info_;
@@ -494,7 +506,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
FeedbackSlotCache* feedback_slot_cache_;
- GlobalDeclarationsBuilder* globals_builder_;
+ TopLevelDeclarationsBuilder* top_level_builder_;
BlockCoverageBuilder* block_coverage_builder_;
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
@@ -518,8 +530,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeJumpTable* generator_jump_table_;
int suspend_count_;
+ // TODO(solanes): assess if we can move loop_depth_ into LoopScope.
int loop_depth_;
+ LoopScope* current_loop_scope_;
+
HandlerTable::CatchPrediction catch_prediction_;
};
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index a5d2e6e50a..4f953341d4 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -37,7 +37,7 @@ namespace interpreter {
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort) \
- V(NativeContextIndex, OperandTypeInfo::kScalableUnsignedByte)
+ V(NativeContextIndex, OperandTypeInfo::kFixedUnsignedByte)
// Carefully ordered for operand type range checks below.
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index d75e45967b..ea6814db81 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -233,8 +233,13 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// a vector of register metadata.
// There is at least one parameter, which is the JS receiver.
DCHECK_NE(parameter_count, 0);
+#ifdef V8_REVERSE_JSARGS
+ int first_slot_index = parameter_count - 1;
+#else
+ int first_slot_index = 0;
+#endif
register_info_table_offset_ =
- -Register::FromParameterIndex(0, parameter_count).index();
+ -Register::FromParameterIndex(first_slot_index, parameter_count).index();
// Initialize register map for parameters, locals, and the
// accumulator.
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 56f6297016..4e56c3b411 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -8,10 +8,17 @@ namespace v8 {
namespace internal {
namespace interpreter {
+#ifdef V8_REVERSE_JSARGS
+static const int kFirstParamRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kFirstParamFromFp) /
+ kSystemPointerSize;
+#else
static const int kLastParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kLastParamFromFp) /
kSystemPointerSize;
+#endif
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
@@ -36,14 +43,22 @@ static const int kCallerPCOffsetRegisterIndex =
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
DCHECK_LT(index, parameter_count);
+#ifdef V8_REVERSE_JSARGS
+ int register_index = kFirstParamRegisterIndex - index;
+#else
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+#endif
DCHECK_LT(register_index, 0);
return Register(register_index);
}
int Register::ToParameterIndex(int parameter_count) const {
DCHECK(is_parameter());
+#ifdef V8_REVERSE_JSARGS
+ return kFirstParamRegisterIndex - index();
+#else
return index() - kLastParamRegisterIndex + parameter_count - 1;
+#endif
}
Register Register::function_closure() {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 80f9e4d311..0b638418d1 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -333,9 +333,6 @@ namespace interpreter {
OperandType::kRegPair, OperandType::kIdx) \
V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
\
- /* Perform a stack guard check */ \
- V(StackCheck, AccumulatorUse::kNone) \
- \
/* Update the pending message */ \
V(SetPendingMessage, AccumulatorUse::kReadWrite) \
\
@@ -644,10 +641,11 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
}
// Return true if |bytecode| is a jump without effects,
- // e.g. any jump excluding those that include type coercion like
- // JumpIfTrueToBoolean.
+ // e.g. any jump excluding those that include type coercion like
+ // JumpIfTrueToBoolean, and JumpLoop due to having an implicit StackCheck.
static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
- return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+ return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode) &&
+ bytecode != Bytecode::kJumpLoop;
}
// Returns true if the bytecode is a switch.
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index f36556f1d4..feb3abab95 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -13,6 +13,7 @@
#include "src/ast/scopes.h"
#include "src/base/functional.h"
#include "src/execution/isolate.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -64,8 +65,9 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
}
#if DEBUG
+template <typename LocalIsolate>
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
- Isolate* isolate) const {
+ LocalIsolate* isolate) const {
std::set<Smi> smis;
std::set<double> heap_numbers;
std::set<const AstRawString*> strings;
@@ -166,8 +168,9 @@ ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
UNREACHABLE();
}
+template <typename LocalIsolate>
MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
- Isolate* isolate) const {
+ LocalIsolate* isolate) const {
const ConstantArraySlice* slice = IndexToSlice(index);
DCHECK_LT(index, slice->capacity());
if (index < slice->start_index() + slice->size()) {
@@ -177,7 +180,15 @@ MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
return MaybeHandle<Object>();
}
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
+ Isolate* isolate) const;
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> ConstantArrayBuilder::At(
+ size_t index, OffThreadIsolate* isolate) const;
+
+template <typename LocalIsolate>
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(LocalIsolate* isolate) {
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), AllocationType::kOld);
int array_index = 0;
@@ -207,6 +218,12 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
return fixed_array;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(
+ OffThreadIsolate* isolate);
+
size_t ConstantArrayBuilder::Insert(Smi smi) {
auto entry = smi_map_.find(smi);
if (entry == smi_map_.end()) {
@@ -362,7 +379,9 @@ void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
OperandSizeToSlice(operand_size)->Unreserve();
}
-Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
+template <typename LocalIsolate>
+Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ LocalIsolate* isolate) const {
switch (tag_) {
case Tag::kDeferred:
// We shouldn't have any deferred entries by now.
@@ -376,9 +395,10 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
// TODO(leszeks): There's probably a better value we could use here.
return isolate->factory()->the_hole_value();
case Tag::kRawString:
- return raw_string_->string().get<Factory>();
+ return raw_string_->string();
case Tag::kHeapNumber:
- return isolate->factory()->NewNumber<AllocationType::kOld>(heap_number_);
+ return isolate->factory()->template NewNumber<AllocationType::kOld>(
+ heap_number_);
case Tag::kBigInt:
// This should never fail: the parser will never create a BigInt
// literal that cannot be allocated.
@@ -394,6 +414,11 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
UNREACHABLE();
}
+template Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ Isolate* isolate) const;
+template Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ OffThreadIsolate* isolate) const;
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 968a0cadd5..3376f9a2cc 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -7,6 +7,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/common/globals.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects/smi.h"
#include "src/utils/identity-map.h"
@@ -52,12 +53,16 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
explicit ConstantArrayBuilder(Zone* zone);
// Generate a fixed array of constant handles based on inserted objects.
- Handle<FixedArray> ToFixedArray(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ToFixedArray(LocalIsolate* isolate);
// Returns the object, as a handle in |isolate|, that is in the constant pool
// array at index |index|. Returns null if there is no handle at this index.
// Only expected to be used in tests.
- MaybeHandle<Object> At(size_t index, Isolate* isolate) const;
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> At(size_t index, LocalIsolate* isolate) const;
// Returns the number of elements in the array.
size_t size() const;
@@ -150,7 +155,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
smi_ = smi;
}
- Handle<Object> ToHandle(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<Object> ToHandle(LocalIsolate* isolate) const;
private:
explicit Entry(Tag tag) : tag_(tag) {}
@@ -199,7 +205,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
const Entry& At(size_t index) const;
#if DEBUG
- void CheckAllElementsAreUnique(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ void CheckAllElementsAreUnique(LocalIsolate* isolate) const;
#endif
inline size_t available() const { return capacity() - reserved() - size(); }
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 3fa1274f82..7062550d30 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,6 +47,7 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
+ DCHECK(end_labels_.empty() || end_labels_.is_bound());
}
void LoopBuilder::LoopHeader() {
@@ -54,7 +55,8 @@ void LoopBuilder::LoopHeader() {
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
- DCHECK(break_labels_.empty() && continue_labels_.empty());
+ DCHECK(break_labels_.empty() && continue_labels_.empty() &&
+ end_labels_.empty());
builder()->Bind(&loop_header_);
}
@@ -64,17 +66,30 @@ void LoopBuilder::LoopBody() {
}
}
-void LoopBuilder::JumpToHeader(int loop_depth) {
- // Pass the proper loop nesting level to the backwards branch, to trigger
- // on-stack replacement when armed for the given loop nesting depth.
- int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
- // Loop must have closed form, i.e. all loop elements are within the loop,
- // the loop header precedes the body and next elements in the loop.
- builder()->JumpLoop(&loop_header_, level);
+void LoopBuilder::JumpToHeader(int loop_depth, LoopBuilder* const parent_loop) {
+ BindLoopEnd();
+ if (parent_loop &&
+ loop_header_.offset() == parent_loop->loop_header_.offset()) {
+ // TurboFan can't cope with multiple loops that have the same loop header
+ // bytecode offset. If we have an inner loop with the same header offset
+ // than its parent loop, we do not create a JumpLoop bytecode. Instead, we
+ // Jump to our parent's JumpToHeader which in turn can be a JumpLoop or, iff
+ // they are a nested inner loop too, a Jump to its parent's JumpToHeader.
+ parent_loop->JumpToLoopEnd();
+ } else {
+ // Pass the proper loop nesting level to the backwards branch, to trigger
+ // on-stack replacement when armed for the given loop nesting depth.
+ int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ builder()->JumpLoop(&loop_header_, level, source_position_);
+ }
}
void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
+void LoopBuilder::BindLoopEnd() { end_labels_.Bind(builder()); }
+
SwitchBuilder::~SwitchBuilder() {
#ifdef DEBUG
for (auto site : case_sites_) {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index d4f2d11e7c..ac68947e1a 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -9,6 +9,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/interpreter/block-coverage-builder.h"
+#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecode-label.h"
#include "src/zone/zone-containers.h"
@@ -79,7 +80,6 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
BlockCoverageBuilder* block_coverage_builder_;
};
-
// Class to track control flow for block statements (which can break in JS).
class V8_EXPORT_PRIVATE BlockBuilder final
: public BreakableControlFlowBuilder {
@@ -91,7 +91,6 @@ class V8_EXPORT_PRIVATE BlockBuilder final
statement) {}
};
-
// A class to help with co-ordinating break and continue statements with
// their loop.
class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
@@ -99,18 +98,20 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
LoopBuilder(BytecodeArrayBuilder* builder,
BlockCoverageBuilder* block_coverage_builder, AstNode* node)
: BreakableControlFlowBuilder(builder, block_coverage_builder, node),
- continue_labels_(builder->zone()) {
+ continue_labels_(builder->zone()),
+ end_labels_(builder->zone()) {
if (block_coverage_builder_ != nullptr) {
block_coverage_body_slot_ =
block_coverage_builder_->AllocateBlockCoverageSlot(
node, SourceRangeKind::kBody);
}
+ source_position_ = node ? node->position() : kNoSourcePosition;
}
~LoopBuilder() override;
void LoopHeader();
void LoopBody();
- void JumpToHeader(int loop_depth);
+ void JumpToHeader(int loop_depth, LoopBuilder* const parent_loop);
void BindContinueTarget();
// This method is called when visiting continue statements in the AST.
@@ -121,15 +122,28 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
private:
+ // Emit a Jump to our parent_loop_'s end label which could be a JumpLoop or,
+ // iff they are a nested inner loop with the same loop header bytecode offset
+ // as their parent's, a Jump to its parent's end label.
+ void JumpToLoopEnd() { EmitJump(&end_labels_); }
+ void BindLoopEnd();
+
BytecodeLoopHeader loop_header_;
// Unbound labels that identify jumps for continue statements in the code and
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
+ // Unbound labels that identify jumps for nested inner loops which share the
+ // same header offset as this loop. Said inner loops will Jump to our end
+ // label, which could be a JumpLoop or, iff we are a nested inner loop too, a
+ // Jump to our parent's end label.
+ BytecodeLabels end_labels_;
+
int block_coverage_body_slot_;
-};
+ int source_position_;
+};
// A class to help with co-ordinating break statements with their switch.
class V8_EXPORT_PRIVATE SwitchBuilder final
@@ -165,7 +179,6 @@ class V8_EXPORT_PRIVATE SwitchBuilder final
ZoneVector<BytecodeLabel> case_sites_;
};
-
// A class to help with co-ordinating control flow in try-catch statements.
class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
public:
@@ -194,7 +207,6 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
TryCatchStatement* statement_;
};
-
// A class to help with co-ordinating control flow in try-finally statements.
class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
public:
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 831d31d09f..91c6e819c1 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -15,7 +15,8 @@ namespace interpreter {
HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
+template <typename LocalIsolate>
+Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(LocalIsolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray(
HandlerTable::LengthForRange(handler_table_size), AllocationType::kOld);
@@ -31,6 +32,10 @@ Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
return table_byte_array;
}
+template Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(
+ Isolate* isolate);
+template Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(
+ OffThreadIsolate* isolate);
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 66b8d1f937..9bf2b17258 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -28,7 +28,8 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final {
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<ByteArray> ToHandlerTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<ByteArray> ToHandlerTable(LocalIsolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 971642344b..eaea1c91dd 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -122,8 +122,8 @@ void InterpreterAssembler::SaveBytecodeOffset() {
IntPtrConstant(payload_offset),
TruncateIntPtrToInt32(bytecode_offset));
} else {
- StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
- IntPtrConstant(store_offset), SmiTag(bytecode_offset));
+ StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
+ SmiTag(bytecode_offset));
}
}
@@ -265,11 +265,9 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
#if V8_TARGET_LITTLE_ENDIAN
index += 4;
#endif
- return ChangeInt32ToIntPtr(
- Load(MachineType::Int32(), base, IntPtrConstant(index)));
+ return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
} else {
- return SmiToIntPtr(
- Load(MachineType::TaggedSigned(), base, IntPtrConstant(index)));
+ return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
}
}
@@ -625,6 +623,13 @@ TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
}
+TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
+ int operand_index) {
+ TNode<IntPtrT> index =
+ ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
+ return IntPtrToTaggedIndex(index);
+}
+
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
@@ -766,9 +771,15 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
+#ifdef V8_REVERSE_JSARGS
+ TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
+ context, function, arg_count, args...,
+ UndefinedConstant());
+#else
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count,
UndefinedConstant(), args...);
+#endif
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -1476,7 +1487,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
+ V8_TARGET_ARCH_PPC64
return true;
#else
#error "Unknown Architecture"
@@ -1523,9 +1535,14 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> reg_base =
+ IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
+#else
TNode<IntPtrT> reg_base = IntPtrAdd(
IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
formal_parameter_count_intptr);
+#endif
Goto(&loop);
BIND(&loop);
@@ -1534,7 +1551,11 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
+#else
TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
+#endif
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 006b247794..729e23c7a6 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Smi> BytecodeOperandIdxSmi(int operand_index);
+ // Returns the TaggedIndex immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ TNode<TaggedIndex> BytecodeOperandIdxTaggedIndex(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Uint32T> BytecodeOperandUImm(int operand_index);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 50ceaf462f..6b8b7135e0 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -168,12 +168,8 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Dispatch();
});
- LazyNode<Smi> lazy_smi_slot = [=] {
- return SmiTag(Signed(BytecodeOperandIdx(slot_operand_index)));
- };
-
- LazyNode<UintPtrT> lazy_slot = [=] {
- return BytecodeOperandIdx(slot_operand_index);
+ LazyNode<TaggedIndex> lazy_slot = [=] {
+ return BytecodeOperandIdxTaggedIndex(slot_operand_index);
};
LazyNode<Context> lazy_context = [=] { return GetContext(); };
@@ -184,9 +180,8 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
return name;
};
- accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_smi_slot, lazy_slot,
- lazy_context, lazy_name, typeof_mode,
- &exit_point);
+ accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_slot, lazy_context,
+ lazy_name, typeof_mode, &exit_point);
}
};
@@ -222,14 +217,13 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// Store the global via the StoreGlobalIC.
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
Label no_feedback(this, Label::kDeferred), end(this);
GotoIf(IsUndefined(maybe_vector), &no_feedback);
- CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, smi_slot,
+ CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot,
maybe_vector);
Goto(&end);
@@ -514,13 +508,14 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> feedback_slot = BytecodeOperandIdx(2);
// Load receiver.
TNode<Object> recv = LoadRegisterAtOperandIndex(0);
// Load the name and context lazily.
- LazyNode<Smi> lazy_smi_slot = [=] { return SmiTag(Signed(feedback_slot)); };
+ LazyNode<TaggedIndex> lazy_slot = [=] {
+ return BytecodeOperandIdxTaggedIndex(2);
+ };
LazyNode<Name> lazy_name = [=] {
return CAST(LoadConstantPoolEntryAtOperandIndex(1));
};
@@ -530,8 +525,8 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TVARIABLE(Object, var_result);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LazyLoadICParameters params(
- lazy_context, recv, lazy_name, lazy_smi_slot, feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(lazy_context, recv, lazy_name,
+ lazy_slot, feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -562,14 +557,13 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector);
+ var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name, slot,
+ feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -586,14 +580,13 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, smi_slot, maybe_vector);
+ value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -649,14 +642,13 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, maybe_vector);
+ value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -674,14 +666,13 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<Object> array = LoadRegisterAtOperandIndex(0);
TNode<Object> index = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector);
+ index, value, slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -705,13 +696,13 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Smi> flags =
SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
- TNode<Smi> vector_index = BytecodeOperandIdxSmi(3);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(3);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
- value, flags, feedback_vector, vector_index);
+ value, flags, feedback_vector, slot);
Dispatch();
}
@@ -1586,17 +1577,32 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
+#ifdef V8_REVERSE_JSARGS
+ CallJSAndDispatch(
+ function, context, Int32Constant(arg_count), receiver_mode,
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
+#else
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
+#endif
break;
case 3:
+#ifdef V8_REVERSE_JSARGS
+ CallJSAndDispatch(
+ function, context, Int32Constant(arg_count), receiver_mode,
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
+#else
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
+#endif
break;
default:
UNREACHABLE();
@@ -1874,14 +1880,13 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
TNode<Object> name = LoadRegisterAtOperandIndex(0);
TNode<Object> object = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
- smi_slot, feedback_vector);
+ var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name, slot,
+ feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1902,7 +1907,8 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
// Record feedback for the {callable} in the {feedback_vector}.
CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector),
- slot_id);
+ slot_id,
+ CallableFeedbackMode::kDontCollectFeedbackCell);
Goto(&feedback_done);
BIND(&feedback_done);
@@ -2361,12 +2367,15 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
// Jump by the number of bytes represented by the immediate operand |imm|. Also
-// performs a loop nesting check and potentially triggers OSR in case the
-// current OSR level matches (or exceeds) the specified |loop_depth|.
+// performs a loop nesting check, a stack check, and potentially triggers OSR in
+// case the current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
TNode<Int8T> osr_level = LoadOsrNestingLevel();
+ TNode<Context> context = GetContext();
+
+ PerformStackCheck(context);
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
@@ -2381,7 +2390,6 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
TNode<Code> target = HeapConstant(callable.code());
- TNode<Context> context = GetContext();
CallStub(callable.descriptor(), target, context);
JumpBackward(relative_jump);
}
@@ -2425,7 +2433,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Smi> flags =
SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
TNode<Context> context = GetContext();
@@ -2434,7 +2442,7 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
result = constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context);
+ feedback_vector, slot, pattern, flags, context);
SetAccumulator(result.value());
Dispatch();
}
@@ -2445,7 +2453,7 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Context> context = GetContext();
TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
@@ -2461,7 +2469,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
{
ConstructorBuiltinsAssembler constructor_assembler(state());
TNode<JSArray> result = constructor_assembler.EmitCreateShallowArrayLiteral(
- CAST(feedback_vector), slot_id, context, &call_runtime,
+ CAST(feedback_vector), slot, context, &call_runtime,
TRACK_ALLOCATION_SITE);
SetAccumulator(result);
Dispatch();
@@ -2476,7 +2484,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<Object> constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
TNode<Object> result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(Signed(slot_id)), constant_elements, flags);
+ slot, constant_elements, flags);
SetAccumulator(result);
Dispatch();
}
@@ -2487,7 +2495,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(0);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(0);
TNode<Context> context = GetContext();
Label no_feedback(this, Label::kDeferred), end(this);
@@ -2496,7 +2504,7 @@ IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
result = constructor_assembler.EmitCreateEmptyArrayLiteral(
- CAST(maybe_feedback_vector), slot_id, context);
+ CAST(maybe_feedback_vector), slot, context);
Goto(&end);
BIND(&no_feedback);
@@ -2534,7 +2542,7 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
@@ -2552,7 +2560,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
TNode<HeapObject> result =
constructor_assembler.EmitCreateShallowObjectLiteral(
- CAST(feedback_vector), slot_id, &if_not_fast_clone);
+ CAST(feedback_vector), slot, &if_not_fast_clone);
SetAccumulator(result);
Dispatch();
}
@@ -2569,9 +2577,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- TNode<Object> result = CallRuntime(Runtime::kCreateObjectLiteral, context,
- feedback_vector, SmiTag(Signed(slot_id)),
- object_boilerplate_description, flags);
+ TNode<Object> result =
+ CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
+ slot, object_boilerplate_description, flags);
SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2600,14 +2608,13 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
TNode<UintPtrT> raw_flags =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
TNode<Smi> smi_flags = SmiTag(Signed(raw_flags));
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags,
- smi_slot, maybe_feedback_vector);
+ slot, maybe_feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -2836,15 +2843,6 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
Dispatch();
}
-// StackCheck
-//
-// Performs a stack guard check.
-IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
- TNode<Context> context = GetContext();
- PerformStackCheck(context);
- Dispatch();
-}
-
// SetPendingMessage
//
// Sets the pending message to the value in the accumulator, and returns the
@@ -2852,7 +2850,8 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
TNode<ExternalReference> pending_message = ExternalConstant(
ExternalReference::address_of_pending_message_obj(isolate()));
- TNode<HeapObject> previous_message = Load<HeapObject>(pending_message);
+ TNode<HeapObject> previous_message =
+ UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
TNode<Object> new_message = GetAccumulator();
StoreFullTaggedNoWriteBarrier(pending_message, new_message);
SetAccumulator(previous_message);
@@ -3009,7 +3008,7 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0));
+ TNode<JSReceiver> receiver = CAST(LoadRegisterAtOperandIndex(0));
TNode<Context> context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
@@ -3188,14 +3187,12 @@ IGNITION_HANDLER(GetIterator, InterpreterAssembler) {
TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
TNode<Context> context = GetContext();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<IntPtrT> load_feedback_slot = Signed(BytecodeOperandIdx(1));
- TNode<IntPtrT> call_feedback_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> load_slot_smi = SmiTag(load_feedback_slot);
- TNode<Smi> call_slot_smi = SmiTag(call_feedback_slot);
+ TNode<TaggedIndex> load_slot = BytecodeOperandIdxTaggedIndex(1);
+ TNode<TaggedIndex> call_slot = BytecodeOperandIdxTaggedIndex(2);
TNode<Object> iterator =
CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver,
- load_slot_smi, call_slot_smi, feedback_vector);
+ load_slot, call_slot, feedback_vector);
SetAccumulator(iterator);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index a7afb8263e..54f4a3caa3 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/init/bootstrapper.h"
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecode-generator.h"
@@ -40,12 +41,20 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
Status ExecuteJobImpl() final;
Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) final;
+ Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ OffThreadIsolate* isolate) final;
private:
BytecodeGenerator* generator() { return &generator_; }
- void CheckAndPrintBytecodeMismatch(Isolate* isolate, Handle<Script> script,
+ template <typename LocalIsolate>
+ void CheckAndPrintBytecodeMismatch(LocalIsolate* isolate,
+ Handle<Script> script,
Handle<BytecodeArray> bytecode);
+ template <typename LocalIsolate>
+ Status DoFinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ LocalIsolate* isolate);
+
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
BytecodeGenerator generator_;
@@ -104,10 +113,6 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
kEntriesPerOperandScale;
}
-int Interpreter::InterruptBudget() {
- return FLAG_interrupt_budget;
-}
-
namespace {
void MaybePrintAst(ParseInfo* parse_info,
@@ -171,11 +176,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
#ifdef DEBUG
+template <typename LocalIsolate>
void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
- Isolate* isolate, Handle<Script> script, Handle<BytecodeArray> bytecode) {
- int first_mismatch = generator()->CheckBytecodeMatches(bytecode);
+ LocalIsolate* isolate, Handle<Script> script,
+ Handle<BytecodeArray> bytecode) {
+ int first_mismatch = generator()->CheckBytecodeMatches(*bytecode);
if (first_mismatch >= 0) {
- parse_info()->ast_value_factory()->Internalize(isolate->factory());
+ parse_info()->ast_value_factory()->Internalize(isolate);
DeclarationScope::AllocateScopeInfos(parse_info(), isolate);
Handle<BytecodeArray> new_bytecode =
@@ -184,8 +191,7 @@ void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
std::cerr << "Bytecode mismatch";
#ifdef OBJECT_PRINT
std::cerr << " found for function: ";
- Handle<String> name =
- parse_info()->function_name()->string().get<Factory>();
+ Handle<String> name = parse_info()->function_name()->string();
if (name->length() == 0) {
std::cerr << "anonymous";
} else {
@@ -214,7 +220,22 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
RuntimeCallCounterId::kCompileIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
+ return DoFinalizeJobImpl(shared_info, isolate);
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
+ Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate) {
+ RuntimeCallTimerScope runtimeTimerScope(
+ parse_info()->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundIgnitionFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileIgnitionFinalization");
+ return DoFinalizeJobImpl(shared_info, isolate);
+}
+template <typename LocalIsolate>
+InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
+ Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
Handle<BytecodeArray> bytecodes = compilation_info_.bytecode_array();
if (bytecodes.is_null()) {
bytecodes = generator()->FinalizeBytecode(
@@ -265,7 +286,7 @@ Interpreter::NewSourcePositionCollectionJob(
auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
@@ -361,9 +382,7 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
if (counter > 0) {
std::string to_name = Bytecodes::ToString(to_bytecode);
Local<v8::String> to_name_object =
- v8::String::NewFromUtf8(isolate, to_name.c_str(),
- NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, to_name.c_str()).ToLocalChecked();
Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
CHECK(counters_row
->DefineOwnProperty(context, to_name_object, counter_object)
@@ -373,9 +392,7 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
std::string from_name = Bytecodes::ToString(from_bytecode);
Local<v8::String> from_name_object =
- v8::String::NewFromUtf8(isolate, from_name.c_str(),
- NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, from_name.c_str()).ToLocalChecked();
CHECK(
counters_map->DefineOwnProperty(context, from_name_object, counters_row)
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index d85b872639..3ef28fdfbf 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -38,9 +38,6 @@ class Interpreter {
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() = default;
- // Returns the interrupt budget which should be used for the profiler counter.
- V8_EXPORT_PRIVATE static int InterruptBudget();
-
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.