summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-03-12 09:01:49 +0100
committerMichaël Zasso <targos@protonmail.com>2019-03-14 18:49:21 +0100
commit7b48713334469818661fe276cf571de9c7899f2d (patch)
tree4dbda49ac88db76ce09dc330a0cb587e68e139ba /deps/v8/src/interpreter
parent8549ac09b256666cf5275224ec58fab9939ff32e (diff)
downloadnode-new-7b48713334469818661fe276cf571de9c7899f2d.tar.gz
deps: update V8 to 7.3.492.25
PR-URL: https://github.com/nodejs/node/pull/25852 Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc14
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1613
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h144
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h13
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc12
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h21
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc78
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h9
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc418
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc56
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h45
-rw-r--r--deps/v8/src/interpreter/interpreter.cc102
-rw-r--r--deps/v8/src/interpreter/interpreter.h19
22 files changed, 1759 insertions, 841 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 3ec2cc595b..e455cfd065 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -197,11 +197,11 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-Object* BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
+Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
return bytecode_array()->constant_pool()->get(index);
}
-Object* BytecodeArrayAccessor::GetConstantForIndexOperand(
+Object BytecodeArrayAccessor::GetConstantForIndexOperand(
int operand_index) const {
return GetConstantAtIndex(GetIndexOperand(operand_index));
}
@@ -215,7 +215,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
}
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi* smi = Smi::cast(GetConstantForIndexOperand(0));
+ Smi smi = Smi::cast(GetConstantForIndexOperand(0));
return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
@@ -285,7 +285,7 @@ JumpTableTargetOffsets::iterator::iterator(
int case_value, int table_offset, int table_end,
const BytecodeArrayAccessor* accessor)
: accessor_(accessor),
- current_(Smi::kZero),
+ current_(Smi::zero()),
index_(case_value),
table_offset_(table_offset),
table_end_(table_end) {
@@ -317,7 +317,7 @@ bool JumpTableTargetOffsets::iterator::operator!=(
void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
if (table_offset_ >= table_end_) return;
- Object* current = accessor_->GetConstantAtIndex(table_offset_);
+ Object current = accessor_->GetConstantAtIndex(table_offset_);
while (!current->IsSmi()) {
DCHECK(current->IsTheHole());
++table_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index e36eed8ade..db33b6f6ac 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -10,6 +10,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects.h"
+#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -42,7 +43,7 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
void UpdateAndAdvanceToValid();
const BytecodeArrayAccessor* accessor_;
- Smi* current_;
+ Smi current_;
int index_;
int table_offset_;
int table_end_;
@@ -92,8 +93,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Object* GetConstantAtIndex(int offset) const;
- Object* GetConstantForIndexOperand(int operand_index) const;
+ Object GetConstantAtIndex(int offset) const;
+ Object GetConstantForIndexOperand(int operand_index) const;
// Returns the absolute offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index a7c95aae7b..2183068576 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -13,6 +13,7 @@
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -393,7 +394,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperationSmiLiteral(
- Token::Value op, Smi* literal, int feedback_slot) {
+ Token::Value op, Smi literal, int feedback_slot) {
switch (op) {
case Token::Value::ADD:
OutputAddSmi(literal->value(), feedback_slot);
@@ -571,8 +572,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
- v8::internal::Smi* smi) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Smi smi) {
int32_t raw_smi = smi->value();
if (raw_smi == 0) {
OutputLdaZero();
@@ -994,10 +994,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayFromIterable() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- size_t constant_properties_entry, int literal_index, int flags,
- Register output) {
- OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
- output);
+ size_t constant_properties_entry, int literal_index, int flags) {
+ OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags);
return *this;
}
@@ -1344,6 +1342,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
Register context) {
+ // TODO(leszeks): Do we need to start a new basic block here? Could we simply
+ // get the current bytecode offset from the array writer instead?
BytecodeLabel try_begin;
Bind(&try_begin);
handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index bf5909d8e4..d362ffffa4 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
- BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+ BytecodeArrayBuilder& LoadLiteral(Smi value);
BytecodeArrayBuilder& LoadLiteral(double value);
BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
@@ -246,8 +246,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& CreateEmptyArrayLiteral(int literal_index);
BytecodeArrayBuilder& CreateArrayFromIterable();
BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
- int literal_index, int flags,
- Register output);
+ int literal_index, int flags);
BytecodeArrayBuilder& CreateEmptyObjectLiteral();
BytecodeArrayBuilder& CloneObject(Register source, int flags,
int feedback_slot);
@@ -349,7 +348,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
int feedback_slot);
// Same as above, but lhs in the accumulator and rhs in |literal|.
BytecodeArrayBuilder& BinaryOperationSmiLiteral(Token::Value binop,
- Smi* literal,
+ Smi literal,
int feedback_slot);
// Unary and Count Operators (value stored in accumulator).
@@ -408,7 +407,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label);
- BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfJSReceiver(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNotNull(BytecodeLabel* label);
@@ -522,6 +520,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
}
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
+ bool RemainderOfBlockIsDead() const {
+ return bytecode_array_writer_.RemainderOfBlockIsDead();
+ }
// Returns the raw operand value for the given register or register list.
uint32_t GetInputRegisterOperand(Register reg);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3dbd009879..a563ff4fc3 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -42,7 +42,7 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
- int frame_size = register_count * kPointerSize;
+ int frame_size = register_count * kSystemPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
Handle<ByteArray> source_position_table =
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 9700d2c1cf..e6db2fce22 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -45,6 +45,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
int parameter_count,
Handle<ByteArray> handler_table);
+ bool RemainderOfBlockIsDead() const { return exit_seen_in_block_; }
+
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
// plus the actual bytecode, plus the maximum number of operands times
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 57ff5cd850..59dcf54132 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -7,7 +7,6 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/code-stubs.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -31,10 +30,10 @@ uint8_t CreateObjectLiteralFlags::Encode(int runtime_flags,
}
// static
-uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope) {
+uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope,
+ bool might_always_opt) {
uint8_t result = PretenuredBit::encode(pretenure);
- if (!FLAG_always_opt && !FLAG_prepare_always_opt &&
- pretenure == NOT_TENURED && is_function_scope) {
+ if (!might_always_opt && pretenure == NOT_TENURED && is_function_scope) {
result |= FastNewClosureBit::encode(true);
}
return result;
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 0e0ae256ed..6f05770192 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -43,7 +43,8 @@ class CreateClosureFlags {
class PretenuredBit : public BitField8<bool, 0, 1> {};
class FastNewClosureBit : public BitField8<bool, PretenuredBit::kNext, 1> {};
- static uint8_t Encode(bool pretenure, bool is_function_scope);
+ static uint8_t Encode(bool pretenure, bool is_function_scope,
+ bool might_always_opt);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CreateClosureFlags);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 48682439fb..00b1916c92 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -8,7 +8,6 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -18,6 +17,7 @@
#include "src/objects-inl.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/smi.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
#include "src/unoptimized-compilation-info.h"
@@ -97,7 +97,8 @@ class BytecodeGenerator::ContextScope {
class BytecodeGenerator::ControlScope {
public:
explicit ControlScope(BytecodeGenerator* generator)
- : generator_(generator), outer_(generator->execution_control()),
+ : generator_(generator),
+ outer_(generator->execution_control()),
context_(generator->execution_context()) {
generator_->set_execution_control(this);
}
@@ -158,6 +159,16 @@ class BytecodeGenerator::ControlScope {
// paths going through the finally-block to dispatch after leaving the block.
class BytecodeGenerator::ControlScope::DeferredCommands final {
public:
+ // Fixed value tokens for paths we know we need.
+ // Fallthrough is set to -1 to make it the fallthrough case of the jump table,
+ // where the remaining cases start at 0.
+ static const int kFallthroughToken = -1;
+ // TODO(leszeks): Rethrow being 0 makes it use up a valuable LdaZero, which
+ // means that other commands (such as break or return) have to use LdaSmi.
+ // This can very slightly bloat bytecode, so perhaps token values should all
+ // be shifted down by 1.
+ static const int kRethrowToken = 0;
+
DeferredCommands(BytecodeGenerator* generator, Register token_register,
Register result_register)
: generator_(generator),
@@ -165,8 +176,13 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
token_register_(token_register),
result_register_(result_register),
return_token_(-1),
- async_return_token_(-1),
- rethrow_token_(-1) {}
+ async_return_token_(-1) {
+ // There's always a rethrow path.
+ // TODO(leszeks): We could decouple deferred_ index and token to allow us
+ // to still push this lazily.
+ STATIC_ASSERT(kRethrowToken == 0);
+ deferred_.push_back({CMD_RETHROW, nullptr, kRethrowToken});
+ }
// One recorded control-flow command.
struct Entry {
@@ -211,7 +227,7 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Records the dispatch token to be used to identify the implicit fall-through
// path at the end of a try-block into the corresponding finally-block.
void RecordFallThroughPath() {
- builder()->LoadLiteral(Smi::FromInt(-1));
+ builder()->LoadLiteral(Smi::FromInt(kFallthroughToken));
builder()->StoreAccumulatorInRegister(token_register_);
// Since we're not saving the accumulator in the result register, shove a
// harmless value there instead so that it is still considered "killed" in
@@ -277,7 +293,7 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
case CMD_ASYNC_RETURN:
return GetAsyncReturnToken();
case CMD_RETHROW:
- return GetRethrowToken();
+ return kRethrowToken;
default:
// TODO(leszeks): We could also search for entries with the same
// command and statement.
@@ -299,13 +315,6 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
return async_return_token_;
}
- int GetRethrowToken() {
- if (rethrow_token_ == -1) {
- rethrow_token_ = GetNewTokenForCommand(CMD_RETHROW, nullptr);
- }
- return rethrow_token_;
- }
-
int GetNewTokenForCommand(Command command, Statement* statement) {
int token = static_cast<int>(deferred_.size());
deferred_.push_back({command, statement, token});
@@ -320,7 +329,6 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Tokens for commands that don't need a statement.
int return_token_;
int async_return_token_;
- int rethrow_token_;
};
// Scoped class for dealing with control flow reaching the function level.
@@ -552,6 +560,8 @@ class BytecodeGenerator::RegisterAllocationScope final {
outer_next_register_index_);
}
+ BytecodeGenerator* generator() const { return generator_; }
+
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
@@ -559,21 +569,47 @@ class BytecodeGenerator::RegisterAllocationScope final {
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
+class BytecodeGenerator::AccumulatorPreservingScope final {
+ public:
+ explicit AccumulatorPreservingScope(BytecodeGenerator* generator,
+ AccumulatorPreservingMode mode)
+ : generator_(generator) {
+ if (mode == AccumulatorPreservingMode::kPreserve) {
+ saved_accumulator_register_ =
+ generator_->register_allocator()->NewRegister();
+ generator_->builder()->StoreAccumulatorInRegister(
+ saved_accumulator_register_);
+ }
+ }
+
+ ~AccumulatorPreservingScope() {
+ if (saved_accumulator_register_.is_valid()) {
+ generator_->builder()->LoadAccumulatorWithRegister(
+ saved_accumulator_register_);
+ }
+ }
+
+ private:
+ BytecodeGenerator* generator_;
+ Register saved_accumulator_register_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccumulatorPreservingScope);
+};
+
// Scoped base class for determining how the result of an expression will be
// used.
class BytecodeGenerator::ExpressionResultScope {
public:
ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
- : generator_(generator),
- outer_(generator->execution_result()),
+ : outer_(generator->execution_result()),
allocator_(generator),
kind_(kind),
type_hint_(TypeHint::kAny) {
- generator_->set_execution_result(this);
+ generator->set_execution_result(this);
}
- virtual ~ExpressionResultScope() {
- generator_->set_execution_result(outer_);
+ ~ExpressionResultScope() {
+ allocator_.generator()->set_execution_result(outer_);
}
bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -599,7 +635,6 @@ class BytecodeGenerator::ExpressionResultScope {
TypeHint type_hint() const { return type_hint_; }
private:
- BytecodeGenerator* generator_;
ExpressionResultScope* outer_;
RegisterAllocationScope allocator_;
Expression::Context kind_;
@@ -639,9 +674,7 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
- void SetResultConsumedByTest() {
- result_consumed_by_test_ = true;
- }
+ void SetResultConsumedByTest() { result_consumed_by_test_ = true; }
bool result_consumed_by_test() { return result_consumed_by_test_; }
// Inverts the control flow of the operation, swapping the then and else
@@ -730,7 +763,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
data->set(array_index++, *declaration.name->string());
data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
- Object* undefined_or_literal_slot;
+ Object undefined_or_literal_slot;
if (declaration.literal_slot.IsInvalid()) {
undefined_or_literal_slot = ReadOnlyRoots(isolate).undefined_value();
} else {
@@ -871,7 +904,7 @@ class BytecodeGenerator::IteratorRecord final {
static bool IsInEagerLiterals(
FunctionLiteral* literal,
- const ZoneVector<FunctionLiteral*>& eager_literals) {
+ const std::vector<FunctionLiteral*>& eager_literals) {
for (FunctionLiteral* eager_literal : eager_literals) {
if (literal == eager_literal) return true;
}
@@ -883,7 +916,7 @@ static bool IsInEagerLiterals(
BytecodeGenerator::BytecodeGenerator(
UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- ZoneVector<FunctionLiteral*>* eager_inner_literals)
+ std::vector<FunctionLiteral*>* eager_inner_literals)
: zone_(info->zone()),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
@@ -922,6 +955,12 @@ BytecodeGenerator::BytecodeGenerator(
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
Isolate* isolate, Handle<Script> script) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+#ifdef DEBUG
+ // Unoptimized compilation should be context-independent. Verify that we don't
+ // access the native context by nulling it out during finalization.
+ SaveContext save(isolate);
+ isolate->set_context(Context());
+#endif
AllocateDeferredConstants(isolate, script);
@@ -1075,7 +1114,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Create a generator object if necessary and initialize the
// {.generator_object} variable.
- if (info()->literal()->CanSuspend()) {
+ if (IsResumableFunction(info()->literal()->kind())) {
BuildGeneratorObjectVariableInitialization();
}
@@ -1104,9 +1143,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind()) &&
- info()->literal()->requires_instance_fields_initializer()) {
- BuildInstanceFieldInitialization(Register::function_closure(),
- builder()->Receiver());
+ info()->literal()->requires_instance_members_initializer()) {
+ BuildInstanceMemberInitialization(Register::function_closure(),
+ builder()->Receiver());
}
// Visit statements in the function body.
@@ -1121,7 +1160,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
}
void BytecodeGenerator::AllocateTopLevelRegisters() {
- if (info()->literal()->CanSuspend()) {
+ if (IsResumableFunction(info()->literal()->kind())) {
// Either directly use generator_object_var or allocate a new register for
// the incoming generator object.
Variable* generator_object_var = closure_scope()->generator_object_var();
@@ -1181,7 +1220,7 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
- Variable* variable = decl->proxy()->var();
+ Variable* variable = decl->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
@@ -1210,7 +1249,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
break;
case VariableLocation::LOOKUP: {
- DCHECK_EQ(VariableMode::kVar, variable->mode());
+ DCHECK_EQ(VariableMode::kDynamic, variable->mode());
DCHECK(!variable->binding_needs_init());
Register name = register_allocator()->NewRegister();
@@ -1232,9 +1271,10 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- Variable* variable = decl->proxy()->var();
+ Variable* variable = decl->var();
DCHECK(variable->mode() == VariableMode::kLet ||
- variable->mode() == VariableMode::kVar);
+ variable->mode() == VariableMode::kVar ||
+ variable->mode() == VariableMode::kDynamic);
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackSlot slot =
@@ -1247,13 +1287,13 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
case VariableLocation::CONTEXT: {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
builder()->StoreContextSlot(execution_context()->reg(), variable->index(),
0);
break;
@@ -1263,7 +1303,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(args[0]);
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
Runtime::kDeclareEvalFunction, args);
break;
@@ -1291,8 +1331,7 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
->LoadLiteral(Smi::FromInt(entry->module_request))
.StoreAccumulatorInRegister(module_request)
.CallRuntime(Runtime::kGetModuleNamespace, module_request);
- Variable* var = closure_scope()->LookupLocal(entry->local_name);
- DCHECK_NOT_NULL(var);
+ Variable* var = closure_scope()->LookupInModule(entry->local_name);
BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
}
@@ -1326,13 +1365,14 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
globals_builder_ = new (zone()) GlobalDeclarationsBuilder(zone());
}
-void BytecodeGenerator::VisitStatements(ZonePtrList<Statement>* statements) {
+void BytecodeGenerator::VisitStatements(
+ const ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
// Allocate an outer register allocations scope for the statement.
RegisterAllocationScope allocation_scope(this);
Statement* stmt = statements->at(i);
Visit(stmt);
- if (stmt->IsJump()) break;
+ if (builder()->RemainderOfBlockIsDead()) break;
}
}
@@ -1341,8 +1381,7 @@ void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
VisitForEffect(stmt->expression());
}
-void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
-}
+void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {}
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
ConditionalControlFlowBuilder conditional_builder(
@@ -1463,6 +1502,107 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
}
+template <typename TryBodyFunc, typename CatchBodyFunc>
+void BytecodeGenerator::BuildTryCatch(
+ TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryCatchStatement* stmt_for_coverage) {
+ TryCatchBuilder try_control_builder(
+ builder(),
+ stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
+ stmt_for_coverage, catch_prediction);
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting 'throw' control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryCatch scope(this, &try_control_builder);
+ try_body_func();
+ }
+ try_control_builder.EndTry();
+
+ catch_body_func(context);
+
+ try_control_builder.EndCatch();
+}
+
+template <typename TryBodyFunc, typename FinallyBodyFunc>
+void BytecodeGenerator::BuildTryFinally(
+ TryBodyFunc try_body_func, FinallyBodyFunc finally_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryFinallyStatement* stmt_for_coverage) {
+ // We can't know whether the finally block will override ("catch") an
+ // exception thrown in the try block, so we just adopt the outer prediction.
+ TryFinallyBuilder try_control_builder(
+ builder(),
+ stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
+ stmt_for_coverage, catch_prediction);
+
+ // We keep a record of all paths that enter the finally-block to be able to
+ // dispatch to the correct continuation point after the statements in the
+ // finally-block have been evaluated.
+ //
+ // The try-finally construct can enter the finally-block in three ways:
+ // 1. By exiting the try-block normally, falling through at the end.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (i.e. through break/continue/return statements).
+ // 3. By exiting the try-block with a thrown exception.
+ //
+ // The result register semantics depend on how the block was entered:
+ // - ReturnStatement: It represents the return value being returned.
+ // - ThrowStatement: It represents the exception being thrown.
+ // - BreakStatement/ContinueStatement: Undefined and not used.
+ // - Falling through into finally-block: Undefined and not used.
+ Register token = register_allocator()->NewRegister();
+ Register result = register_allocator()->NewRegister();
+ ControlScope::DeferredCommands commands(this, token, result);
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting all control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
+ try_body_func();
+ }
+ try_control_builder.EndTry();
+
+ // Record fall-through and exception cases.
+ commands.RecordFallThroughPath();
+ try_control_builder.LeaveTry();
+ try_control_builder.BeginHandler();
+ commands.RecordHandlerReThrowPath();
+
+ // Pending message object is saved on entry.
+ try_control_builder.BeginFinally();
+ Register message = context; // Reuse register.
+
+ // Clear message object as we enter the finally block.
+ builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
+ message);
+
+ // Evaluate the finally-block.
+ finally_body_func(token);
+ try_control_builder.EndFinally();
+
+ // Pending message object is restored on exit.
+ builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
+
+ // Dynamic dispatch after the finally-block.
+ commands.ApplyDeferredCommands();
+}
+
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
@@ -1540,76 +1680,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
loop_builder.JumpToHeader(loop_depth_);
}
-void BytecodeGenerator::VisitForInAssignment(Expression* expr) {
- DCHECK(expr->IsValidReferenceExpression());
-
- // Evaluate assignment starting with the value to be stored in the
- // accumulator.
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), Token::ASSIGN,
- proxy->hole_check_mode());
- break;
- }
- case NAMED_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- Register object = VisitForRegisterValue(property->obj());
- const AstRawString* name =
- property->key()->AsLiteral()->AsRawPropertyName();
- builder()->LoadAccumulatorWithRegister(value);
- FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
- builder()->StoreNamedProperty(object, name, feedback_index(slot),
- language_mode());
- builder()->LoadAccumulatorWithRegister(value);
- break;
- }
- case KEYED_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- Register object = VisitForRegisterValue(property->obj());
- Register key = VisitForRegisterValue(property->key());
- builder()->LoadAccumulatorWithRegister(value);
- FeedbackSlot slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
- builder()->StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
- builder()->LoadAccumulatorWithRegister(value);
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- builder()->StoreAccumulatorInRegister(args[3]);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
- builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
- .StoreAccumulatorInRegister(args[2])
- .CallRuntime(StoreToSuperRuntimeId(), args);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- builder()->StoreAccumulatorInRegister(args[3]);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
- VisitForRegisterValue(property->key(), args[2]);
- builder()->CallRuntime(StoreKeyedToSuperRuntimeId(), args);
- break;
- }
- }
-}
-
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral()) {
@@ -1636,7 +1706,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up loop counter
Register index = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::kZero);
+ builder()->LoadLiteral(Smi::zero());
builder()->StoreAccumulatorInRegister(index);
// The loop
@@ -1649,7 +1719,18 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
- VisitForInAssignment(stmt->each());
+
+ // Assign accumulator value to the 'each' target.
+ {
+ EffectResultScope scope(this);
+ // Make sure to preserve the accumulator across the PrepareAssignmentLhs
+ // call.
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(
+ stmt->each(), AccumulatorPreservingMode::kPreserve);
+ builder()->SetExpressionPosition(stmt->each());
+ BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
+ }
+
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
@@ -1659,22 +1740,94 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->Bind(&subject_undefined_label);
}
+// Desugar a for-of statement into an application of the iteration protocol.
+//
+// for (EACH of SUBJECT) BODY
+//
+// becomes
+//
+// iterator = %GetIterator(SUBJECT)
+// try {
+//
+// loop {
+// // Make sure we are considered 'done' if .next(), .done or .value fail.
+// done = true
+// value = iterator.next()
+// if (value.done) break;
+// value = value.value
+// done = false
+//
+// EACH = value
+// BODY
+// }
+// done = true
+//
+// } catch(e) {
+// iteration_continuation = RETHROW
+// } finally {
+// %FinalizeIteration(iterator, done, iteration_continuation)
+// }
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+ EffectResultScope effect_scope(this);
- builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
- VisitForEffect(stmt->assign_iterator());
- VisitForEffect(stmt->assign_next());
+ builder()->SetExpressionAsStatementPosition(stmt->subject());
+ VisitForAccumulatorValue(stmt->subject());
- loop_builder.LoopHeader();
- builder()->SetExpressionAsStatementPosition(stmt->next_result());
- VisitForEffect(stmt->next_result());
- TypeHint type_hint = VisitForAccumulatorValue(stmt->result_done());
- loop_builder.BreakIfTrue(ToBooleanModeFromTypeHint(type_hint));
+ // Store the iterator in a dedicated register so that it can be closed on
+ // exit, and the 'done' value in a dedicated register so that it can be
+ // changed and accessed independently of the iteration result.
+ IteratorRecord iterator = BuildGetIteratorRecord(stmt->type());
+ Register done = register_allocator()->NewRegister();
+ builder()->LoadFalse();
+ builder()->StoreAccumulatorInRegister(done);
+
+ BuildTryFinally(
+ // Try block.
+ [&]() {
+ Register next_result = register_allocator()->NewRegister();
+
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+ loop_builder.LoopHeader();
+
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+
+ // Call the iterator's .next() method. Break from the loop if the `done`
+ // property is truthy, otherwise load the value from the iterator result
+ // and append the argument.
+ builder()->SetExpressionAsStatementPosition(stmt->each());
+ BuildIteratorNext(iterator, next_result);
+ builder()->LoadNamedProperty(
+ next_result, ast_string_constants()->done_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
- VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
+ builder()
+ // value = value.value
+ ->LoadNamedProperty(
+ next_result, ast_string_constants()->value_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ // done = false, before the assignment to each happens, so that done is
+ // false if the assignment throws.
+ builder()
+ ->StoreAccumulatorInRegister(next_result)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(done);
+
+ // Assign to the 'each' target.
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(stmt->each());
+ builder()->LoadAccumulatorWithRegister(next_result);
+ BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
+
+ VisitIterationBody(stmt, &loop_builder);
+
+ loop_builder.JumpToHeader(loop_depth_);
+ },
+ // Finally block.
+ [&](Register iteration_continuation_token) {
+ // Finish the iteration in the finally block.
+ BuildFinalizeIteration(iterator, done, iteration_continuation_token);
+ },
+ HandlerTable::UNCAUGHT);
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
@@ -1684,111 +1837,45 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
HandlerTable::CatchPrediction outer_catch_prediction = catch_prediction();
set_catch_prediction(stmt->GetCatchPrediction(outer_catch_prediction));
- TryCatchBuilder try_control_builder(builder(), block_coverage_builder_, stmt,
- catch_prediction());
-
- // Preserve the context in a dedicated register, so that it can be restored
- // when the handler is entered by the stack-unwinding machinery.
- // TODO(mstarzinger): Be smarter about register allocation.
- Register context = register_allocator()->NewRegister();
- builder()->MoveRegister(Register::current_context(), context);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting 'throw' control commands.
- try_control_builder.BeginTry(context);
- {
- ControlScopeForTryCatch scope(this, &try_control_builder);
- Visit(stmt->try_block());
- set_catch_prediction(outer_catch_prediction);
- }
- try_control_builder.EndTry();
-
- if (stmt->scope()) {
- // Create a catch scope that binds the exception.
- BuildNewLocalCatchContext(stmt->scope());
- builder()->StoreAccumulatorInRegister(context);
- }
+ BuildTryCatch(
+ // Try body.
+ [&]() {
+ Visit(stmt->try_block());
+ set_catch_prediction(outer_catch_prediction);
+ },
+ // Catch body.
+ [&](Register context) {
+ if (stmt->scope()) {
+ // Create a catch scope that binds the exception.
+ BuildNewLocalCatchContext(stmt->scope());
+ builder()->StoreAccumulatorInRegister(context);
+ }
- // If requested, clear message object as we enter the catch block.
- if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
- builder()->LoadTheHole().SetPendingMessage();
- }
+ // If requested, clear message object as we enter the catch block.
+ if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
+ builder()->LoadTheHole().SetPendingMessage();
+ }
- // Load the catch context into the accumulator.
- builder()->LoadAccumulatorWithRegister(context);
+ // Load the catch context into the accumulator.
+ builder()->LoadAccumulatorWithRegister(context);
- // Evaluate the catch-block.
- if (stmt->scope()) {
- VisitInScope(stmt->catch_block(), stmt->scope());
- } else {
- VisitBlock(stmt->catch_block());
- }
- try_control_builder.EndCatch();
+ // Evaluate the catch-block.
+ if (stmt->scope()) {
+ VisitInScope(stmt->catch_block(), stmt->scope());
+ } else {
+ VisitBlock(stmt->catch_block());
+ }
+ },
+ catch_prediction(), stmt);
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- // We can't know whether the finally block will override ("catch") an
- // exception thrown in the try block, so we just adopt the outer prediction.
- TryFinallyBuilder try_control_builder(builder(), block_coverage_builder_,
- stmt, catch_prediction());
-
- // We keep a record of all paths that enter the finally-block to be able to
- // dispatch to the correct continuation point after the statements in the
- // finally-block have been evaluated.
- //
- // The try-finally construct can enter the finally-block in three ways:
- // 1. By exiting the try-block normally, falling through at the end.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (i.e. through break/continue/return statements).
- // 3. By exiting the try-block with a thrown exception.
- //
- // The result register semantics depend on how the block was entered:
- // - ReturnStatement: It represents the return value being returned.
- // - ThrowStatement: It represents the exception being thrown.
- // - BreakStatement/ContinueStatement: Undefined and not used.
- // - Falling through into finally-block: Undefined and not used.
- Register token = register_allocator()->NewRegister();
- Register result = register_allocator()->NewRegister();
- ControlScope::DeferredCommands commands(this, token, result);
-
- // Preserve the context in a dedicated register, so that it can be restored
- // when the handler is entered by the stack-unwinding machinery.
- // TODO(mstarzinger): Be smarter about register allocation.
- Register context = register_allocator()->NewRegister();
- builder()->MoveRegister(Register::current_context(), context);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting all control commands.
- try_control_builder.BeginTry(context);
- {
- ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
- Visit(stmt->try_block());
- }
- try_control_builder.EndTry();
-
- // Record fall-through and exception cases.
- commands.RecordFallThroughPath();
- try_control_builder.LeaveTry();
- try_control_builder.BeginHandler();
- commands.RecordHandlerReThrowPath();
-
- // Pending message object is saved on entry.
- try_control_builder.BeginFinally();
- Register message = context; // Reuse register.
-
- // Clear message object as we enter the finally block.
- builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
- message);
-
- // Evaluate the finally-block.
- Visit(stmt->finally_block());
- try_control_builder.EndFinally();
-
- // Pending message object is restored on exit.
- builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
-
- // Dynamic dispatch after the finally-block.
- commands.ApplyDeferredCommands();
+ BuildTryFinally(
+ // Try block.
+ [&]() { Visit(stmt->try_block()); },
+ // Finally block.
+ [&](Register body_continuation_token) { Visit(stmt->finally_block()); },
+ catch_prediction(), stmt);
}
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
@@ -1799,7 +1886,8 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
DCHECK(expr->scope()->outer_scope() == current_scope());
uint8_t flags = CreateClosureFlags::Encode(
- expr->pretenure(), closure_scope()->is_function_scope());
+ expr->pretenure(), closure_scope()->is_function_scope(),
+ info()->might_always_opt());
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
FeedbackSlot slot = GetCachedCreateClosureSlot(expr);
builder()->CreateClosure(entry, feedback_index(slot), flags);
@@ -1819,14 +1907,11 @@ bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
if (loop_depth_ > 0) return false;
- // A non-top-level iife is likely to be executed multiple times and so
- // shouldn`t be optimized as one-shot.
- bool is_toplevel_iife = info()->literal()->is_iife() &&
- current_scope()->outer_scope()->is_script_scope();
- return info()->literal()->is_toplevel() || is_toplevel_iife;
+ return info()->literal()->is_toplevel() ||
+ info()->literal()->is_oneshot_iife();
}
-void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
+void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
size_t class_boilerplate_entry =
builder()->AllocateDeferredConstantPoolEntry();
class_literals_.push_back(std::make_pair(expr, class_boilerplate_entry));
@@ -1859,7 +1944,6 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) {
- DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD);
Register key = register_allocator()->GrowRegisterList(&args);
builder()->SetExpressionAsStatementPosition(property->key());
@@ -1881,7 +1965,8 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
.Bind(&done);
}
- if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ DCHECK(!property->is_private());
// Initialize field's name variable with the computed name.
DCHECK_NOT_NULL(property->computed_name_var());
builder()->LoadAccumulatorWithRegister(key);
@@ -1890,16 +1975,22 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
}
- if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ if (property->is_private()) {
+ RegisterAllocationScope private_name_register_scope(this);
+ Register private_name = register_allocator()->NewRegister();
+ VisitForRegisterValue(property->key(), private_name);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+ .StoreAccumulatorInRegister(private_name)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+ DCHECK_NOT_NULL(property->private_name_var());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
- } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
- builder()->CallRuntime(Runtime::kCreatePrivateFieldSymbol);
- DCHECK_NOT_NULL(property->private_field_name_var());
- BuildVariableAssignment(property->private_field_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- continue;
}
Register value = register_allocator()->GrowRegisterList(&args);
@@ -1920,12 +2011,12 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
HoleCheckMode::kElided);
}
- if (expr->instance_fields_initializer_function() != nullptr) {
+ if (expr->instance_members_initializer_function() != nullptr) {
Register initializer =
- VisitForRegisterValue(expr->instance_fields_initializer_function());
+ VisitForRegisterValue(expr->instance_members_initializer_function());
if (FunctionLiteral::NeedsHomeObject(
- expr->instance_fields_initializer_function())) {
+ expr->instance_members_initializer_function())) {
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()->LoadAccumulatorWithRegister(prototype).StoreHomeObjectProperty(
initializer, feedback_index(slot), language_mode());
@@ -1939,6 +2030,24 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
if (expr->static_fields_initializer() != nullptr) {
+ // TODO(gsathya): This can be optimized away to be a part of the
+ // class boilerplate in the future. The name argument can be
+ // passed to the DefineClass runtime function and have it set
+ // there.
+ if (name.is_valid()) {
+ Register key = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(ast_string_constants()->name_string())
+ .StoreAccumulatorInRegister(key);
+
+ DataPropertyInLiteralFlags data_property_flags =
+ DataPropertyInLiteralFlag::kNoFlags;
+ FeedbackSlot slot =
+ feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
+ builder()->LoadAccumulatorWithRegister(name).StoreDataPropertyInLiteral(
+ class_constructor, key, data_property_flags, feedback_index(slot));
+ }
+
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer =
VisitForRegisterValue(expr->static_fields_initializer());
@@ -1960,19 +2069,23 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+ VisitClassLiteral(expr, Register::invalid_value());
+}
+
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr, Register name) {
CurrentScope current_scope(this, expr->scope());
DCHECK_NOT_NULL(expr->scope());
if (expr->scope()->NeedsContext()) {
BuildNewLocalBlockContext(expr->scope());
ContextScope scope(this, expr->scope());
- BuildClassLiteral(expr);
+ BuildClassLiteral(expr, name);
} else {
- BuildClassLiteral(expr);
+ BuildClassLiteral(expr, name);
}
}
-void BytecodeGenerator::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* stmt) {
+void BytecodeGenerator::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* stmt) {
RegisterList args = register_allocator()->NewRegisterList(3);
Register constructor = args[0], key = args[1], value = args[2];
builder()->MoveRegister(builder()->Receiver(), constructor);
@@ -1981,17 +2094,19 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
ClassLiteral::Property* property = stmt->fields()->at(i);
if (property->is_computed_name()) {
- DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD);
+ DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
+ DCHECK(!property->is_private());
Variable* var = property->computed_name_var();
DCHECK_NOT_NULL(var);
// The computed name is already evaluated and stored in a
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
- } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
- Variable* private_field_name_var = property->private_field_name_var();
- DCHECK_NOT_NULL(private_field_name_var);
- BuildVariableLoad(private_field_name_var, HoleCheckMode::kElided);
+ } else if (property->kind() == ClassLiteral::Property::FIELD &&
+ property->is_private()) {
+ Variable* private_name_var = property->private_name_var();
+ DCHECK_NOT_NULL(private_name_var);
+ BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
} else {
BuildLoadPropertyKey(property, key);
@@ -2002,15 +2117,16 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
VisitSetHomeObject(value, constructor, property);
Runtime::FunctionId function_id =
- property->kind() == ClassLiteral::Property::PUBLIC_FIELD
+ property->kind() == ClassLiteral::Property::FIELD &&
+ !property->is_private()
? Runtime::kCreateDataProperty
: Runtime::kAddPrivateField;
builder()->CallRuntime(function_id, args);
}
}
-void BytecodeGenerator::BuildInstanceFieldInitialization(Register constructor,
- Register instance) {
+void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
+ Register instance) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer = register_allocator()->NewRegister();
@@ -2127,7 +2243,9 @@ void BytecodeGenerator::BuildCreateObjectLiteral(Register literal,
// optimize once the CreateShallowObjectLiteral stub is in sync with the TF
// optimizations.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- builder()->CreateObjectLiteral(entry, literal_index, flags, literal);
+ builder()
+ ->CreateObjectLiteral(entry, literal_index, flags)
+ .StoreAccumulatorInRegister(literal);
}
}
@@ -2317,7 +2435,20 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Register key = register_allocator()->NewRegister();
BuildLoadPropertyKey(property, key);
builder()->SetExpressionPosition(property->value());
- Register value = VisitForRegisterValue(property->value());
+ Register value;
+
+ // Static class fields require the name property to be set on
+ // the class, meaning we can't wait until the
+ // StoreDataPropertyInLiteral call later to set the name.
+ if (property->value()->IsClassLiteral() &&
+ property->value()->AsClassLiteral()->static_fields_initializer() !=
+ nullptr) {
+ value = register_allocator()->NewRegister();
+ VisitClassLiteral(property->value()->AsClassLiteral(), key);
+ builder()->StoreAccumulatorInRegister(value);
+ } else {
+ value = VisitForRegisterValue(property->value());
+ }
VisitSetHomeObject(value, literal, property);
DataPropertyInLiteralFlags data_property_flags =
@@ -2369,16 +2500,25 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->LoadAccumulatorWithRegister(literal);
}
-void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
- Register index,
- FeedbackSlot index_slot,
- FeedbackSlot element_slot) {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
-
- builder()->SetExpressionAsStatementPosition(spread->expression());
- IteratorRecord iterator =
- BuildGetIteratorRecord(spread->expression(), IteratorType::kNormal);
+// Fill an array with values from an iterator, starting at a given index. It is
+// guaranteed that the loop will only terminate if the iterator is exhausted, or
+// if one of iterator.next(), value.done, or value.value fail.
+//
+// In pseudocode:
+//
+// loop {
+// value = iterator.next()
+// if (value.done) break;
+// value = value.value
+// array[index++] = value
+// }
+void BytecodeGenerator::BuildFillArrayWithIterator(
+ IteratorRecord iterator, Register array, Register index, Register value,
+ FeedbackSlot next_value_slot, FeedbackSlot next_done_slot,
+ FeedbackSlot index_slot, FeedbackSlot element_slot) {
+ DCHECK(array.is_valid());
+ DCHECK(index.is_valid());
+ DCHECK(value.is_valid());
LoopBuilder loop_builder(builder(), nullptr, nullptr);
loop_builder.LoopHeader();
@@ -2396,8 +2536,7 @@ void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
builder()
// value = value.value
->LoadNamedProperty(value, ast_string_constants()->value_string(),
- feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(value)
+ feedback_index(next_value_slot))
// array[index] = value
.StoreInArrayLiteral(array, index, feedback_index(element_slot))
// index++
@@ -2409,7 +2548,7 @@ void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
}
void BytecodeGenerator::BuildCreateArrayLiteral(
- ZonePtrList<Expression>* elements, ArrayLiteral* expr) {
+ const ZonePtrList<Expression>* elements, ArrayLiteral* expr) {
RegisterAllocationScope register_scope(this);
Register index = register_allocator()->NewRegister();
Register array = register_allocator()->NewRegister();
@@ -2519,9 +2658,20 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
for (; current != end; ++current) {
Expression* subexpr = *current;
if (subexpr->IsSpread()) {
+ RegisterAllocationScope scope(this);
+ builder()->SetExpressionAsStatementPosition(
+ subexpr->AsSpread()->expression());
+ VisitForAccumulatorValue(subexpr->AsSpread()->expression());
+ IteratorRecord iterator = BuildGetIteratorRecord(IteratorType::kNormal);
+
+ Register value = register_allocator()->NewRegister();
+ FeedbackSlot next_value_load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot next_done_load_slot = feedback_spec()->AddLoadICSlot();
FeedbackSlot real_index_slot = index_slot.Get();
- BuildArrayLiteralSpread(subexpr->AsSpread(), array, index,
- real_index_slot, element_slot.Get());
+ FeedbackSlot real_element_slot = element_slot.Get();
+ BuildFillArrayWithIterator(iterator, array, index, value,
+ next_value_load_slot, next_done_load_slot,
+ real_index_slot, real_element_slot);
} else if (!subexpr->IsTheHoleLiteral()) {
// literal[index++] = subexpr
VisitForAccumulatorValue(subexpr);
@@ -2712,18 +2862,13 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
DCHECK(IsAsyncFunction(info()->literal()->kind()));
- RegisterList args = register_allocator()->NewRegisterList(2);
- Register promise = args[0];
- Register return_value = args[1];
- builder()->StoreAccumulatorInRegister(return_value);
-
- Variable* var_promise = closure_scope()->promise_var();
- DCHECK_NOT_NULL(var_promise);
- BuildVariableLoad(var_promise, HoleCheckMode::kElided);
+ RegisterList args = register_allocator()->NewRegisterList(3);
builder()
- ->StoreAccumulatorInRegister(promise)
- .CallRuntime(Runtime::kInlineResolvePromise, args)
- .LoadAccumulatorWithRegister(promise);
+ ->MoveRegister(generator_object(), args[0]) // generator
+ .StoreAccumulatorInRegister(args[1]) // value
+ .LoadBoolean(info()->literal()->CanSuspend())
+ .StoreAccumulatorInRegister(args[2]) // can_suspend
+ .CallRuntime(Runtime::kInlineAsyncFunctionResolve, args);
}
BuildReturn(source_position);
@@ -2863,18 +3008,18 @@ void BytecodeGenerator::BuildVariableAssignment(
}
}
-void BytecodeGenerator::BuildLoadNamedProperty(Property* property,
+void BytecodeGenerator::BuildLoadNamedProperty(const Expression* object_expr,
Register object,
const AstRawString* name) {
if (ShouldOptimizeAsOneShot()) {
builder()->LoadNamedPropertyNoFeedback(object, name);
} else {
- FeedbackSlot slot = GetCachedLoadICSlot(property->obj(), name);
+ FeedbackSlot slot = GetCachedLoadICSlot(object_expr, name);
builder()->LoadNamedProperty(object, name, feedback_index(slot));
}
}
-void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
+void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
Register object,
const AstRawString* name) {
Register value;
@@ -2886,7 +3031,7 @@ void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
if (ShouldOptimizeAsOneShot()) {
builder()->StoreNamedPropertyNoFeedback(object, name, language_mode());
} else {
- FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
+ FeedbackSlot slot = GetCachedStoreICSlot(object_expr, name);
builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
}
@@ -2896,35 +3041,69 @@ void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
}
}
-void BytecodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression() ||
- (expr->op() == Token::INIT && expr->target()->IsVariableProxy() &&
- expr->target()->AsVariableProxy()->is_this()));
- Register object, key;
- RegisterList super_property_args;
- const AstRawString* name;
-
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
+ return AssignmentLhsData(NON_PROPERTY, expr, RegisterList(), Register(),
+ Register(), nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NamedProperty(Expression* object_expr,
+ Register object,
+ const AstRawString* name) {
+ return AssignmentLhsData(NAMED_PROPERTY, nullptr, RegisterList(), object,
+ Register(), object_expr, name);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::KeyedProperty(Register object,
+ Register key) {
+ return AssignmentLhsData(KEYED_PROPERTY, nullptr, RegisterList(), object, key,
+ nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
+ RegisterList super_property_args) {
+ return AssignmentLhsData(NAMED_SUPER_PROPERTY, nullptr, super_property_args,
+ Register(), Register(), nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty(
+ RegisterList super_property_args) {
+ return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args,
+ Register(), Register(), nullptr, nullptr);
+}
+
+BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
+ Expression* lhs, AccumulatorPreservingMode accumulator_preserving_mode) {
// Left-hand side can only be a property, a global or a variable slot.
- Property* property = expr->target()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
+ Property* property = lhs->AsProperty();
+ AssignType assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE:
- // Nothing to do to evaluate variable assignment LHS.
- break;
+ case NON_PROPERTY:
+ return AssignmentLhsData::NonProperty(lhs);
case NAMED_PROPERTY: {
- object = VisitForRegisterValue(property->obj());
- name = property->key()->AsLiteral()->AsRawPropertyName();
- break;
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ const AstRawString* name =
+ property->key()->AsLiteral()->AsRawPropertyName();
+ return AssignmentLhsData::NamedProperty(property->obj(), object, name);
}
case KEYED_PROPERTY: {
- object = VisitForRegisterValue(property->obj());
- key = VisitForRegisterValue(property->key());
- break;
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ Register key = VisitForRegisterValue(property->key());
+ return AssignmentLhsData::KeyedProperty(object, key);
}
case NAMED_SUPER_PROPERTY: {
- super_property_args = register_allocator()->NewRegisterList(4);
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ RegisterList super_property_args =
+ register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
@@ -2933,81 +3112,514 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(super_property_args[2]);
- break;
+ return AssignmentLhsData::NamedSuperProperty(super_property_args);
}
case KEYED_SUPER_PROPERTY: {
- super_property_args = register_allocator()->NewRegisterList(4);
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ RegisterList super_property_args =
+ register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
VisitForRegisterValue(super_property->home_object(),
super_property_args[1]);
VisitForRegisterValue(property->key(), super_property_args[2]);
- break;
+ return AssignmentLhsData::KeyedSuperProperty(super_property_args);
}
}
+ UNREACHABLE();
+}
- // Evaluate the value and potentially handle compound assignments by loading
- // the left-hand side value and performing a binary operation.
- if (expr->IsCompoundAssignment()) {
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
- break;
- }
- case NAMED_PROPERTY: {
- BuildLoadNamedProperty(property, object, name);
- break;
- }
- case KEYED_PROPERTY: {
- // Key is already in accumulator at this point due to evaluating the
- // LHS above.
- FeedbackSlot slot = feedback_spec()->AddKeyedLoadICSlot();
- builder()->LoadKeyedProperty(object, feedback_index(slot));
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- builder()->CallRuntime(Runtime::kLoadFromSuper,
- super_property_args.Truncate(3));
- break;
+// Build the iteration finalizer called in the finally block of an iteration
+// protocol execution. This closes the iterator if needed, and suppresses any
+// exception it throws if necessary.
+//
+// In pseudo-code, this builds:
+//
+// if (!done) {
+// let method = iterator.return
+// if (method !== null && method !== undefined) {
+// if (typeof(method) !== "function") throw TypeError
+// try {
+// let return_val = method.call(iterator)
+// if (!%IsObject(return_val)) throw TypeError
+// } catch (e) {
+// if (iteration_continuation != RETHROW)
+// rethrow e
+// }
+// }
+// }
+//
+// For async iterators, iterator.close() becomes await iterator.close().
+void BytecodeGenerator::BuildFinalizeIteration(
+ IteratorRecord iterator, Register done,
+ Register iteration_continuation_token) {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels iterator_is_done(zone());
+
+ // if (!done) {
+ builder()->LoadAccumulatorWithRegister(done).JumpIfTrue(
+ ToBooleanMode::kConvertToBoolean, iterator_is_done.New());
+
+ // method = iterator.return
+ // if (method !== null && method !== undefined) {
+ Register method = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(iterator.object(),
+ ast_string_constants()->return_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(method)
+ .JumpIfUndefined(iterator_is_done.New())
+ .JumpIfNull(iterator_is_done.New());
+
+ // if (typeof(method) !== "function") throw TypeError
+ BytecodeLabel if_callable;
+ builder()
+ ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
+ {
+ // throw %NewTypeError(kReturnMethodNotCallable)
+ RegisterAllocationScope register_scope(this);
+ RegisterList new_type_error_args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
+ .StoreAccumulatorInRegister(new_type_error_args[0])
+ .LoadLiteral(ast_string_constants()->empty_string())
+ .StoreAccumulatorInRegister(new_type_error_args[1])
+ .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
+ .Throw();
+ }
+ builder()->Bind(&if_callable);
+
+ {
+ RegisterAllocationScope register_scope(this);
+ BuildTryCatch(
+ // try {
+ // let return_val = method.call(iterator)
+ // if (!%IsObject(return_val)) throw TypeError
+ // }
+ [&]() {
+ RegisterList args(iterator.object());
+ builder()->CallProperty(
+ method, args, feedback_index(feedback_spec()->AddCallICSlot()));
+ if (iterator.type() == IteratorType::kAsync) {
+ BuildAwait();
+ }
+ builder()->JumpIfJSReceiver(iterator_is_done.New());
+ {
+ // Throw this exception inside the try block so that it is
+ // suppressed by the iteration continuation if necessary.
+ RegisterAllocationScope register_scope(this);
+ Register return_result = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(return_result)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject,
+ return_result);
+ }
+ },
+
+ // catch (e) {
+ // if (iteration_continuation != RETHROW)
+ // rethrow e
+ // }
+ [&](Register context) {
+ // Reuse context register to store the exception.
+ Register close_exception = context;
+ builder()->StoreAccumulatorInRegister(close_exception);
+
+ BytecodeLabel suppress_close_exception;
+ builder()
+ ->LoadLiteral(
+ Smi::FromInt(ControlScope::DeferredCommands::kRethrowToken))
+ .CompareReference(iteration_continuation_token)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean,
+ &suppress_close_exception)
+ .LoadAccumulatorWithRegister(close_exception)
+ .ReThrow()
+ .Bind(&suppress_close_exception);
+ },
+ HandlerTable::UNCAUGHT);
+ }
+
+ iterator_is_done.Bind(builder());
+}
+
+// Get the default value of a destructuring target. Will mutate the
+// destructuring target expression if there is a default value.
+//
+// For
+// a = b
+// in
+// let {a = b} = c
+// returns b and mutates the input into a.
+Expression* BytecodeGenerator::GetDestructuringDefaultValue(
+ Expression** target) {
+ Expression* default_value = nullptr;
+ if ((*target)->IsAssignment()) {
+ Assignment* default_init = (*target)->AsAssignment();
+ DCHECK_EQ(default_init->op(), Token::ASSIGN);
+ default_value = default_init->value();
+ *target = default_init->target();
+ DCHECK((*target)->IsValidReferenceExpression() || (*target)->IsPattern());
+ }
+ return default_value;
+}
+
+// Convert a destructuring assignment to an array literal into a sequence of
+// iterator accesses into the value being assigned (in the accumulator).
+//
+// [a().x, ...b] = accumulator
+//
+// becomes
+//
+// iterator = %GetIterator(accumulator)
+// try {
+//
+// // Individual assignments read off the value from iterator.next() This gets
+// // repeated per destructuring element.
+// if (!done) {
+// // Make sure we are considered 'done' if .next(), .done or .value fail.
+// done = true
+// var next_result = iterator.next()
+// var tmp_done = next_result.done
+// if (!tmp_done) {
+// value = next_result.value
+// done = false
+// }
+// }
+// if (done)
+// value = undefined
+// a().x = value
+//
+// // A spread receives the remaining items in the iterator.
+// var array = []
+// var index = 0
+// %FillArrayWithIterator(iterator, array, index, done)
+// done = true
+// b = array
+//
+// } catch(e) {
+// iteration_continuation = RETHROW
+// } finally {
+// %FinalizeIteration(iterator, done, iteration_continuation)
+// }
+void BytecodeGenerator::BuildDestructuringArrayAssignment(
+ ArrayLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ RegisterAllocationScope scope(this);
+
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+
+ // Store the iterator in a dedicated register so that it can be closed on
+ // exit, and the 'done' value in a dedicated register so that it can be
+ // changed and accessed independently of the iteration result.
+ IteratorRecord iterator = BuildGetIteratorRecord(IteratorType::kNormal);
+ Register done = register_allocator()->NewRegister();
+ builder()->LoadFalse();
+ builder()->StoreAccumulatorInRegister(done);
+
+ BuildTryFinally(
+ // Try block.
+ [&]() {
+ Register next_result = register_allocator()->NewRegister();
+ FeedbackSlot next_value_load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot next_done_load_slot = feedback_spec()->AddLoadICSlot();
+
+ Spread* spread = nullptr;
+ for (Expression* target : *pattern->values()) {
+ if (target->IsSpread()) {
+ spread = target->AsSpread();
+ break;
+ }
+
+ Expression* default_value = GetDestructuringDefaultValue(&target);
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(target);
+ }
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // if (!done) {
+ // // Make sure we are considered done if .next(), .done or .value
+ // // fail.
+ // done = true
+ // var next_result = iterator.next()
+ // var tmp_done = next_result.done
+ // if (!tmp_done) {
+ // value = next_result.value
+ // done = false
+ // }
+ // }
+ // if (done)
+ // value = undefined
+ BytecodeLabels is_done(zone());
+
+ builder()->LoadAccumulatorWithRegister(done);
+ builder()->JumpIfTrue(ToBooleanMode::kConvertToBoolean,
+ is_done.New());
+
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+ BuildIteratorNext(iterator, next_result);
+ builder()
+ ->LoadNamedProperty(next_result,
+ ast_string_constants()->done_string(),
+ feedback_index(next_done_load_slot))
+ .JumpIfTrue(ToBooleanMode::kConvertToBoolean, is_done.New())
+ .LoadNamedProperty(next_result,
+ ast_string_constants()->value_string(),
+ feedback_index(next_value_load_slot))
+ .StoreAccumulatorInRegister(next_result)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(done)
+ .LoadAccumulatorWithRegister(next_result);
+
+ // Only do the assignment if this is not a hole (i.e. 'elided').
+ if (!target->IsTheHoleLiteral()) {
+ // [<pattern> = <init>] = <value>
+ // becomes (roughly)
+ // temp = <value>.next();
+ // <pattern> = temp === undefined ? <init> : temp;
+ BytecodeLabel do_assignment;
+ if (default_value) {
+ builder()->JumpIfNotUndefined(&do_assignment);
+ // Since done == true => temp == undefined, jump directly to using
+ // the default value for that case.
+ is_done.Bind(builder());
+ VisitForAccumulatorValue(default_value);
+ } else {
+ builder()->Jump(&do_assignment);
+ is_done.Bind(builder());
+ builder()->LoadUndefined();
+ }
+ builder()->Bind(&do_assignment);
+
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+ } else {
+ DCHECK_EQ(lhs_data.assign_type(), NON_PROPERTY);
+ is_done.Bind(builder());
+ }
+ }
+
+ if (spread) {
+ RegisterAllocationScope scope(this);
+
+ // A spread is turned into a loop over the remainer of the iterator.
+ Expression* target = spread->expression();
+
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(spread);
+ }
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // var array = [];
+ Register array = register_allocator()->NewRegister();
+ builder()->CreateEmptyArrayLiteral(
+ feedback_index(feedback_spec()->AddLiteralSlot()));
+ builder()->StoreAccumulatorInRegister(array);
+
+ // var index = 0;
+ Register index = register_allocator()->NewRegister();
+ builder()->LoadLiteral(Smi::zero());
+ builder()->StoreAccumulatorInRegister(index);
+
+ // Set done to true, since it's guaranteed to be true by the time the
+ // array fill completes.
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+
+ // Fill the array with the iterator.
+ FeedbackSlot element_slot =
+ feedback_spec()->AddStoreInArrayLiteralICSlot();
+ FeedbackSlot index_slot = feedback_spec()->AddBinaryOpICSlot();
+ BuildFillArrayWithIterator(iterator, array, index, next_result,
+ next_value_load_slot, next_done_load_slot,
+ index_slot, element_slot);
+
+ // Assign the array to the LHS.
+ builder()->LoadAccumulatorWithRegister(array);
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+ }
+ },
+ // Finally block.
+ [&](Register iteration_continuation_token) {
+ // Finish the iteration in the finally block.
+ BuildFinalizeIteration(iterator, done, iteration_continuation_token);
+ },
+ HandlerTable::UNCAUGHT);
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
+// Convert a destructuring assignment to an object literal into a sequence of
+// property accesses into the value being assigned (in the accumulator).
+//
+// { y, [x++]: a(), ...b.c } = value
+//
+// becomes
+//
+// var rest_runtime_callargs = new Array(3);
+// rest_runtime_callargs[0] = value;
+//
+// rest_runtime_callargs[1] = value;
+// y = value.y;
+//
+// var temp1 = %ToName(x++);
+// rest_runtime_callargs[2] = temp1;
+// a() = value[temp1];
+//
+// b.c = %CopyDataPropertiesWithExcludedProperties.call(rest_runtime_callargs);
+void BytecodeGenerator::BuildDestructuringObjectAssignment(
+ ObjectLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ RegisterAllocationScope scope(this);
+
+ // if (value === null || value === undefined)
+ // throw new TypeError(kNonCoercible);
+ //
+ // TODO(leszeks): Eliminate check if value is known to be non-null (e.g.
+ // an object literal).
+ BytecodeLabel is_null_or_undefined, not_null_or_undefined;
+ builder()
+ ->JumpIfNull(&is_null_or_undefined)
+ .JumpIfNotUndefined(&not_null_or_undefined);
+
+ {
+ builder()->Bind(&is_null_or_undefined);
+ builder()->SetExpressionPosition(pattern);
+ builder()->CallRuntime(Runtime::kThrowPatternAssignmentNonCoercible);
+ }
+
+ // Store the assignment value in a register.
+ Register value;
+ RegisterList rest_runtime_callargs;
+ if (pattern->has_rest_property()) {
+ rest_runtime_callargs =
+ register_allocator()->NewRegisterList(pattern->properties()->length());
+ value = rest_runtime_callargs[0];
+ } else {
+ value = register_allocator()->NewRegister();
+ }
+ builder()->Bind(&not_null_or_undefined).StoreAccumulatorInRegister(value);
+
+ int i = 0;
+ for (ObjectLiteralProperty* pattern_property : *pattern->properties()) {
+ RegisterAllocationScope scope(this);
+
+ // The key of the pattern becomes the key into the RHS value, and the value
+ // of the pattern becomes the target of the assignment.
+ //
+ // e.g. { a: b } = o becomes b = o.a
+ Expression* pattern_key = pattern_property->key();
+ Expression* target = pattern_property->value();
+ Expression* default_value = GetDestructuringDefaultValue(&target);
+
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(target);
+ }
+
+ // Calculate this property's key into the assignment RHS value, additionally
+ // storing the key for rest_runtime_callargs if needed.
+ //
+ // The RHS is accessed using the key either by LoadNamedProperty (if
+ // value_name is valid) or by LoadKeyedProperty (otherwise).
+ const AstRawString* value_name = nullptr;
+ Register value_key;
+
+ if (pattern_property->kind() != ObjectLiteralProperty::Kind::SPREAD) {
+ if (pattern_key->IsPropertyName()) {
+ value_name = pattern_key->AsLiteral()->AsRawPropertyName();
}
- case KEYED_SUPER_PROPERTY: {
- builder()->CallRuntime(Runtime::kLoadKeyedFromSuper,
- super_property_args.Truncate(3));
- break;
+ if (pattern->has_rest_property() || !value_name) {
+ if (pattern->has_rest_property()) {
+ value_key = rest_runtime_callargs[i + 1];
+ } else {
+ value_key = register_allocator()->NewRegister();
+ }
+ if (pattern_property->is_computed_name()) {
+ // { [a()]: b().x } = c
+ // becomes
+ // var tmp = a()
+ // b().x = c[tmp]
+ DCHECK(!pattern_key->IsPropertyName() ||
+ !pattern_key->IsNumberLiteral());
+ VisitForAccumulatorValue(pattern_key);
+ builder()->ToName(value_key);
+ } else {
+ // We only need the key for non-computed properties when it is numeric
+ // or is being saved for the rest_runtime_callargs.
+ DCHECK(
+ pattern_key->IsNumberLiteral() ||
+ (pattern->has_rest_property() && pattern_key->IsPropertyName()));
+ VisitForRegisterValue(pattern_key, value_key);
+ }
}
}
- BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
- FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
- if (expr->value()->IsSmiLiteral()) {
- builder()->BinaryOperationSmiLiteral(
- binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
- feedback_index(slot));
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // Get the value from the RHS.
+ if (pattern_property->kind() == ObjectLiteralProperty::Kind::SPREAD) {
+ DCHECK_EQ(i, pattern->properties()->length() - 1);
+ DCHECK(!value_key.is_valid());
+ DCHECK_NULL(value_name);
+ builder()->CallRuntime(Runtime::kCopyDataPropertiesWithExcludedProperties,
+ rest_runtime_callargs);
+ } else if (value_name) {
+ builder()->LoadNamedProperty(
+ value, value_name, feedback_index(feedback_spec()->AddLoadICSlot()));
} else {
- Register old_value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(old_value);
- VisitForAccumulatorValue(expr->value());
- builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
+ DCHECK(value_key.is_valid());
+ builder()->LoadAccumulatorWithRegister(value_key).LoadKeyedProperty(
+ value, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
}
- } else {
- VisitForAccumulatorValue(expr->value());
+
+ // {<pattern> = <init>} = <value>
+ // becomes
+ // temp = <value>;
+ // <pattern> = temp === undefined ? <init> : temp;
+ if (default_value) {
+ BytecodeLabel value_not_undefined;
+ builder()->JumpIfNotUndefined(&value_not_undefined);
+ VisitForAccumulatorValue(default_value);
+ builder()->Bind(&value_not_undefined);
+ }
+
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+
+ i++;
}
- // Store the value.
- builder()->SetExpressionPosition(expr);
- switch (assign_type) {
- case VARIABLE: {
- // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
- // Is the value in the accumulator safe? Yes, but scary.
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), expr->op(),
- proxy->hole_check_mode(),
- expr->lookup_hoisting_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
+void BytecodeGenerator::BuildAssignment(
+ const AssignmentLhsData& lhs_data, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ // Assign the value to the LHS.
+ switch (lhs_data.assign_type()) {
+ case NON_PROPERTY: {
+ if (ObjectLiteral* pattern = lhs_data.expr()->AsObjectLiteral()) {
+ // Split object literals into destructuring.
+ BuildDestructuringObjectAssignment(pattern, op, lookup_hoisting_mode);
+ } else if (ArrayLiteral* pattern = lhs_data.expr()->AsArrayLiteral()) {
+ // Split object literals into destructuring.
+ BuildDestructuringArrayAssignment(pattern, op, lookup_hoisting_mode);
+ } else {
+ DCHECK(lhs_data.expr()->IsVariableProxy());
+ VariableProxy* proxy = lhs_data.expr()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), op, proxy->hole_check_mode(),
+ lookup_hoisting_mode);
+ }
break;
}
case NAMED_PROPERTY: {
- BuildStoreNamedProperty(property, object, name);
+ BuildStoreNamedProperty(lhs_data.object_expr(), lhs_data.object(),
+ lhs_data.name());
break;
}
case KEYED_PROPERTY: {
@@ -3017,8 +3629,8 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
- builder()->StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreKeyedProperty(lhs_data.object(), lhs_data.key(),
+ feedback_index(slot), language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -3026,34 +3638,91 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
case NAMED_SUPER_PROPERTY: {
builder()
- ->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
+ ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
+ .CallRuntime(StoreToSuperRuntimeId(), lhs_data.super_property_args());
break;
}
case KEYED_SUPER_PROPERTY: {
builder()
- ->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
+ ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
+ .CallRuntime(StoreKeyedToSuperRuntimeId(),
+ lhs_data.super_property_args());
break;
}
}
}
+void BytecodeGenerator::VisitAssignment(Assignment* expr) {
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(expr->target());
+
+ VisitForAccumulatorValue(expr->value());
+
+ builder()->SetExpressionPosition(expr);
+ BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
+}
+
void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
- VisitAssignment(expr);
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(expr->target());
+
+ // Evaluate the value and potentially handle compound assignments by loading
+ // the left-hand side value and performing a binary operation.
+ switch (lhs_data.assign_type()) {
+ case NON_PROPERTY: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ BuildLoadNamedProperty(lhs_data.object_expr(), lhs_data.object(),
+ lhs_data.name());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ FeedbackSlot slot = feedback_spec()->AddKeyedLoadICSlot();
+ builder()
+ ->LoadAccumulatorWithRegister(lhs_data.key())
+ .LoadKeyedProperty(lhs_data.object(), feedback_index(slot));
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ builder()->CallRuntime(Runtime::kLoadFromSuper,
+ lhs_data.super_property_args().Truncate(3));
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ builder()->CallRuntime(Runtime::kLoadKeyedFromSuper,
+ lhs_data.super_property_args().Truncate(3));
+ break;
+ }
+ }
+ BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
+ FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
+ if (expr->value()->IsSmiLiteral()) {
+ builder()->BinaryOperationSmiLiteral(
+ binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
+ feedback_index(slot));
+ } else {
+ Register old_value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(old_value);
+ VisitForAccumulatorValue(expr->value());
+ builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
+ }
+
+ builder()->SetExpressionPosition(expr);
+ BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
}
// Suspends the generator to resume at the next suspend_id, with output stored
// in the accumulator. When the generator is resumed, the sent value is loaded
// in the accumulator.
-void BytecodeGenerator::BuildSuspendPoint(Expression* suspend_expr) {
+void BytecodeGenerator::BuildSuspendPoint(int position) {
const int suspend_id = suspend_count_++;
RegisterList registers = register_allocator()->AllLiveRegisters();
// Save context, registers, and state. This bytecode then returns the value
// in the accumulator.
- builder()->SetExpressionPosition(suspend_expr);
+ builder()->SetExpressionPosition(position);
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
// Upon resume, we continue here.
@@ -3090,12 +3759,12 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
builder()
->StoreAccumulatorInRegister(args[0]) // value
.LoadFalse()
- .StoreAccumulatorInRegister(args[1]) // done
+ .StoreAccumulatorInRegister(args[1]) // done
.CallRuntime(Runtime::kInlineCreateIterResultObject, args);
}
}
- BuildSuspendPoint(expr);
+ BuildSuspendPoint(expr->position());
// At this point, the generator has been resumed, with the received value in
// the accumulator.
@@ -3218,8 +3887,8 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
{
RegisterAllocationScope register_scope(this);
RegisterList iterator_and_input = register_allocator()->NewRegisterList(2);
+ VisitForAccumulatorValue(expr->expression());
IteratorRecord iterator = BuildGetIteratorRecord(
- expr->expression(),
register_allocator()->NewRegister() /* next method */,
iterator_and_input[0], iterator_type);
@@ -3306,7 +3975,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kAsync) {
// Await the result of the method invocation.
- BuildAwait(expr);
+ BuildAwait(expr->position());
}
// Check that output is an object.
@@ -3346,7 +4015,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.CallRuntime(Runtime::kInlineAsyncGeneratorYield, args);
}
- BuildSuspendPoint(expr);
+ BuildSuspendPoint(expr->position());
builder()->StoreAccumulatorInRegister(input);
builder()
->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
@@ -3382,7 +4051,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
builder()->LoadAccumulatorWithRegister(output_value);
}
-void BytecodeGenerator::BuildAwait(Expression* await_expr) {
+void BytecodeGenerator::BuildAwait(int position) {
// Rather than HandlerTable::UNCAUGHT, async functions use
// HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are
// transformed into promise rejections. This is necessary to prevent emitting
@@ -3395,38 +4064,24 @@ void BytecodeGenerator::BuildAwait(Expression* await_expr) {
// Await(operand) and suspend.
RegisterAllocationScope register_scope(this);
- int await_builtin_context_index;
- RegisterList args;
+ Runtime::FunctionId await_intrinsic_id;
if (IsAsyncGeneratorFunction(function_kind())) {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT
- : Context::ASYNC_GENERATOR_AWAIT_CAUGHT;
- args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(generator_object(), args[0])
- .StoreAccumulatorInRegister(args[1]);
+ await_intrinsic_id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncGeneratorAwaitUncaught
+ : Runtime::kInlineAsyncGeneratorAwaitCaught;
} else {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX
- : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX;
- args = register_allocator()->NewRegisterList(3);
- builder()
- ->MoveRegister(generator_object(), args[0])
- .StoreAccumulatorInRegister(args[1]);
-
- // AsyncFunction Await builtins require a 3rd parameter to hold the outer
- // promise.
- Variable* var_promise = closure_scope()->promise_var();
- BuildVariableLoadForAccumulatorValue(var_promise, HoleCheckMode::kElided);
- builder()->StoreAccumulatorInRegister(args[2]);
+ await_intrinsic_id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncFunctionAwaitUncaught
+ : Runtime::kInlineAsyncFunctionAwaitCaught;
}
-
- builder()->CallJSRuntime(await_builtin_context_index, args);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(generator_object(), args[0])
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(await_intrinsic_id, args);
}
- BuildSuspendPoint(await_expr);
+ BuildSuspendPoint(position);
Register input = register_allocator()->NewRegister();
Register resume_mode = register_allocator()->NewRegister();
@@ -3454,7 +4109,7 @@ void BytecodeGenerator::BuildAwait(Expression* await_expr) {
void BytecodeGenerator::VisitAwait(Await* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- BuildAwait(expr);
+ BuildAwait(expr->position());
BuildIncrementBlockCoverageCounterIfEnabled(expr,
SourceRangeKind::kContinuation);
}
@@ -3467,15 +4122,15 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
}
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
- LhsKind property_kind = Property::GetAssignType(property);
+ AssignType property_kind = Property::GetAssignType(property);
switch (property_kind) {
- case VARIABLE:
+ case NON_PROPERTY:
UNREACHABLE();
case NAMED_PROPERTY: {
builder()->SetExpressionPosition(property);
const AstRawString* name =
property->key()->AsLiteral()->AsRawPropertyName();
- BuildLoadNamedProperty(property, obj, name);
+ BuildLoadNamedProperty(property->obj(), obj, name);
break;
}
case KEYED_PROPERTY: {
@@ -3541,7 +4196,7 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
}
void BytecodeGenerator::VisitProperty(Property* expr) {
- LhsKind property_kind = Property::GetAssignType(expr);
+ AssignType property_kind = Property::GetAssignType(expr);
if (property_kind != NAMED_SUPER_PROPERTY &&
property_kind != KEYED_SUPER_PROPERTY) {
Register obj = VisitForRegisterValue(expr->obj());
@@ -3556,7 +4211,7 @@ void BytecodeGenerator::VisitResolvedProperty(ResolvedProperty* expr) {
UNREACHABLE();
}
-void BytecodeGenerator::VisitArguments(ZonePtrList<Expression>* args,
+void BytecodeGenerator::VisitArguments(const ZonePtrList<Expression>* args,
RegisterList* arg_regs) {
// Visit arguments.
for (int i = 0; i < static_cast<int>(args->length()); i++) {
@@ -3733,7 +4388,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
void BytecodeGenerator::VisitCallSuper(Call* expr) {
RegisterAllocationScope register_scope(this);
SuperCallReference* super = expr->expression()->AsSuperCallReference();
- ZonePtrList<Expression>* args = expr->arguments();
+ const ZonePtrList<Expression>* args = expr->arguments();
int first_spread_index = 0;
for (; first_spread_index < args->length(); first_spread_index++) {
@@ -3810,11 +4465,11 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// TODO(gsathya): In the future, we could tag nested arrow functions
// or eval with the correct bit so that we do the load conditionally
// if required.
- if (info()->literal()->requires_instance_fields_initializer() ||
+ if (info()->literal()->requires_instance_members_initializer() ||
!IsDerivedConstructor(info()->literal()->kind())) {
Register instance = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(instance);
- BuildInstanceFieldInitialization(this_function, instance);
+ BuildInstanceMemberInitialization(this_function, instance);
builder()->LoadAccumulatorWithRegister(instance);
}
}
@@ -3918,56 +4573,51 @@ void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
}
-void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
- if (expr->expression()->IsProperty()) {
+void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
+ Expression* expr = unary->expression();
+ if (expr->IsProperty()) {
// Delete of an object property is allowed both in sloppy
// and strict modes.
- Property* property = expr->expression()->AsProperty();
+ Property* property = expr->AsProperty();
Register object = VisitForRegisterValue(property->obj());
VisitForAccumulatorValue(property->key());
builder()->Delete(object, language_mode());
- } else if (expr->expression()->IsVariableProxy()) {
+ } else if (expr->IsVariableProxy() && !expr->AsVariableProxy()->is_this() &&
+ !expr->AsVariableProxy()->is_new_target()) {
// Delete of an unqualified identifier is allowed in sloppy mode but is
- // not allowed in strict mode. Deleting 'this' and 'new.target' is allowed
- // in both modes.
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- DCHECK(is_sloppy(language_mode()) || proxy->is_this() ||
- proxy->is_new_target());
- if (proxy->is_this() || proxy->is_new_target()) {
- builder()->LoadTrue();
- } else {
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::CONTEXT: {
- // Deleting local var/let/const, context variables, and arguments
- // does not have any effect.
- builder()->LoadFalse();
- break;
- }
- case VariableLocation::UNALLOCATED:
- // TODO(adamk): Falling through to the runtime results in correct
- // behavior, but does unnecessary context-walking (since scope
- // analysis has already proven that the variable doesn't exist in
- // any non-global scope). Consider adding a DeleteGlobal bytecode
- // that knows how to deal with ScriptContexts as well as global
- // object properties.
- case VariableLocation::LOOKUP: {
- Register name_reg = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(variable->raw_name())
- .StoreAccumulatorInRegister(name_reg)
- .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
- break;
- }
- default:
- UNREACHABLE();
+ // not allowed in strict mode.
+ DCHECK(is_sloppy(language_mode()));
+ Variable* variable = expr->AsVariableProxy()->var();
+ switch (variable->location()) {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ // Deleting local var/let/const, context variables, and arguments
+ // does not have any effect.
+ builder()->LoadFalse();
+ break;
}
+ case VariableLocation::UNALLOCATED:
+ // TODO(adamk): Falling through to the runtime results in correct
+ // behavior, but does unnecessary context-walking (since scope
+ // analysis has already proven that the variable doesn't exist in
+ // any non-global scope). Consider adding a DeleteGlobal bytecode
+ // that knows how to deal with ScriptContexts as well as global
+ // object properties.
+ case VariableLocation::LOOKUP: {
+ Register name_reg = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(variable->raw_name())
+ .StoreAccumulatorInRegister(name_reg)
+ .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
} else {
- // Delete of an unresolvable reference returns true.
- VisitForEffect(expr->expression());
+ // Delete of an unresolvable reference, new.target, and this returns true.
+ VisitForEffect(expr);
builder()->LoadTrue();
}
}
@@ -3977,7 +4627,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
+ AssignType assign_type = Property::GetAssignType(property);
bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
@@ -3986,7 +4636,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
RegisterList super_property_args;
const AstRawString* name;
switch (assign_type) {
- case VARIABLE: {
+ case NON_PROPERTY: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
BuildVariableLoadForAccumulatorValue(proxy->var(),
proxy->hole_check_mode());
@@ -4054,7 +4704,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value.
builder()->SetExpressionPosition(expr);
switch (assign_type) {
- case VARIABLE: {
+ case NON_PROPERTY: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
BuildVariableAssignment(proxy->var(), expr->op(),
proxy->hole_check_mode());
@@ -4208,7 +4858,7 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
Expression* subexpr;
- Smi* literal;
+ Smi literal;
if (expr->IsSmiLiteralOperation(&subexpr, &literal)) {
TypeHint type_hint = VisitForAccumulatorValue(subexpr);
builder()->SetExpressionPosition(expr);
@@ -4277,14 +4927,11 @@ void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
.CallRuntime(Runtime::kDynamicImportCall, args);
}
-void BytecodeGenerator::BuildGetIterator(Expression* iterable,
- IteratorType hint) {
+void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register method = register_allocator()->NewRegister();
Register obj = args[0];
- VisitForAccumulatorValue(iterable);
-
if (hint == IteratorType::kAsync) {
// Set method to GetMethod(obj, @@asyncIterator)
builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
@@ -4346,9 +4993,9 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
// Returns an IteratorRecord which is valid for the lifetime of the current
// register_allocation_scope.
BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
- Expression* iterable, Register next, Register object, IteratorType hint) {
+ Register next, Register object, IteratorType hint) {
DCHECK(next.is_valid() && object.is_valid());
- BuildGetIterator(iterable, hint);
+ BuildGetIterator(hint);
builder()
->StoreAccumulatorInRegister(object)
@@ -4359,10 +5006,10 @@ BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
}
BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
- Expression* iterable, IteratorType hint) {
+ IteratorType hint) {
Register next = register_allocator()->NewRegister();
Register object = register_allocator()->NewRegister();
- return BuildGetIteratorRecord(iterable, next, object, hint);
+ return BuildGetIteratorRecord(next, object, hint);
}
void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
@@ -4371,7 +5018,9 @@ void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
builder()->CallProperty(iterator.next(), RegisterList(iterator.object()),
feedback_index(feedback_spec()->AddCallICSlot()));
- // TODO(caitp): support async IteratorNext here.
+ if (iterator.type() == IteratorType::kAsync) {
+ BuildAwait();
+ }
BytecodeLabel is_object;
builder()
@@ -4413,7 +5062,7 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
if (iterator.type() == IteratorType::kAsync) {
DCHECK_NOT_NULL(expr);
- BuildAwait(expr);
+ BuildAwait(expr->position());
}
builder()->JumpIfJSReceiver(done.New());
@@ -4428,11 +5077,6 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
done.Bind(builder());
}
-void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
- builder()->SetExpressionPosition(expr);
- BuildGetIterator(expr->iterable(), expr->hint());
-}
-
void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
builder()->SetExpressionPosition(expr);
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
@@ -4730,10 +5374,6 @@ void BytecodeGenerator::VisitNaryLogicalAndExpression(NaryOperation* expr) {
}
}
-void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
- Visit(expr->expression());
-}
-
void BytecodeGenerator::BuildNewLocalActivationContext() {
ValueResultScope value_execution_result(this);
Scope* scope = closure_scope();
@@ -4888,7 +5528,7 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// to pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructible, so don't
// assign anything to the new.target variable.
- if (info()->literal()->CanSuspend()) return;
+ if (IsResumableFunction(info()->literal()->kind())) return;
if (variable->location() == VariableLocation::LOCAL) {
// The new.target register was already assigned by entry trampoline.
@@ -4908,10 +5548,15 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
Variable* generator_object_var = closure_scope()->generator_object_var();
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(2);
+ Runtime::FunctionId function_id =
+ (IsAsyncFunction(info()->literal()->kind()) &&
+ !IsAsyncGeneratorFunction(info()->literal()->kind()))
+ ? Runtime::kInlineAsyncFunctionEnter
+ : Runtime::kInlineCreateJSGeneratorObject;
builder()
->MoveRegister(Register::function_closure(), args[0])
.MoveRegister(builder()->Receiver(), args[1])
- .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args)
+ .CallRuntime(function_id, args)
.StoreAccumulatorInRegister(generator_object());
if (generator_object_var->location() == VariableLocation::LOCAL) {
@@ -4934,7 +5579,9 @@ void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
void BytecodeGenerator::BuildLoadPropertyKey(LiteralProperty* property,
Register out_reg) {
if (property->key()->IsStringLiteral()) {
- VisitForRegisterValue(property->key(), out_reg);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawString())
+ .StoreAccumulatorInRegister(out_reg);
} else {
VisitForAccumulatorValue(property->key());
builder()->ToName(out_reg);
@@ -5107,7 +5754,7 @@ LanguageMode BytecodeGenerator::language_mode() const {
}
Register BytecodeGenerator::generator_object() const {
- DCHECK(info()->literal()->CanSuspend());
+ DCHECK(IsResumableFunction(info()->literal()->kind()));
return incoming_new_target_or_generator_;
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 3150245b0b..a5c573f7ff 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
explicit BytecodeGenerator(
UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
@@ -44,7 +44,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visiting function for declarations list and statements are overridden.
void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZonePtrList<Statement>* statments);
+ void VisitStatements(const ZonePtrList<Statement>* statments);
private:
class ContextScope;
@@ -62,6 +62,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class IteratorRecord;
class NaryCodeCoverageSlots;
class RegisterAllocationScope;
+ class AccumulatorPreservingScope;
class TestResultScope;
class ValueResultScope;
@@ -69,6 +70,80 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TestFallthrough { kThen, kElse, kNone };
enum class TypeHint { kAny, kBoolean, kString };
+ enum class AccumulatorPreservingMode { kNone, kPreserve };
+
+ // An assignment has to evaluate its LHS before its RHS, but has to assign to
+ // the LHS after both evaluations are done. This class stores the data
+ // computed in the LHS evaulation that has to live across the RHS evaluation,
+ // and is used in the actual LHS assignment.
+ class AssignmentLhsData {
+ public:
+ static AssignmentLhsData NonProperty(Expression* expr);
+ static AssignmentLhsData NamedProperty(Expression* object_expr,
+ Register object,
+ const AstRawString* name);
+ static AssignmentLhsData KeyedProperty(Register object, Register key);
+ static AssignmentLhsData NamedSuperProperty(
+ RegisterList super_property_args);
+ static AssignmentLhsData KeyedSuperProperty(
+ RegisterList super_property_args);
+
+ AssignType assign_type() const { return assign_type_; }
+ Expression* expr() const {
+ DCHECK_EQ(assign_type_, NON_PROPERTY);
+ return expr_;
+ }
+ Expression* object_expr() const {
+ DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ return object_expr_;
+ }
+ Register object() const {
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
+ return object_;
+ }
+ Register key() const {
+ DCHECK_EQ(assign_type_, KEYED_PROPERTY);
+ return key_;
+ }
+ const AstRawString* name() const {
+ DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ return name_;
+ }
+ RegisterList super_property_args() const {
+ DCHECK(assign_type_ == NAMED_SUPER_PROPERTY ||
+ assign_type_ == KEYED_SUPER_PROPERTY);
+ return super_property_args_;
+ }
+
+ private:
+ AssignmentLhsData(AssignType assign_type, Expression* expr,
+ RegisterList super_property_args, Register object,
+ Register key, Expression* object_expr,
+ const AstRawString* name)
+ : assign_type_(assign_type),
+ expr_(expr),
+ super_property_args_(super_property_args),
+ object_(object),
+ key_(key),
+ object_expr_(object_expr),
+ name_(name) {}
+
+ AssignType assign_type_;
+
+ // Different assignment types use different fields:
+ //
+ // NON_PROPERTY: expr
+ // NAMED_PROPERTY: object_expr, object, name
+ // KEYED_PROPERTY: object, key
+ // NAMED_SUPER_PROPERTY: super_property_args
+ // KEYED_SUPER_PROPERT: super_property_args
+ Expression* expr_;
+ RegisterList super_property_args_;
+ Register object_;
+ Register key_;
+ Expression* object_expr_;
+ const AstRawString* name_;
+ };
void GenerateBytecodeBody();
void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
@@ -101,7 +176,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visit the arguments expressions in |args| and store them in |args_regs|,
// growing |args_regs| for each argument visited.
- void VisitArguments(ZonePtrList<Expression>* args, RegisterList* arg_regs);
+ void VisitArguments(const ZonePtrList<Expression>* args,
+ RegisterList* arg_regs);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -121,9 +197,23 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitPropertyLoadForRegister(Register obj, Property* expr,
Register destination);
- void BuildLoadNamedProperty(Property* property, Register object,
+ AssignmentLhsData PrepareAssignmentLhs(
+ Expression* lhs, AccumulatorPreservingMode accumulator_preserving_mode =
+ AccumulatorPreservingMode::kNone);
+ void BuildAssignment(const AssignmentLhsData& data, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+
+ Expression* GetDestructuringDefaultValue(Expression** target);
+ void BuildDestructuringArrayAssignment(
+ ArrayLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+ void BuildDestructuringObjectAssignment(
+ ObjectLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+
+ void BuildLoadNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
- void BuildStoreNamedProperty(Property* property, Register object,
+ void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
@@ -155,23 +245,25 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalWithContext(Scope* scope);
void BuildGeneratorPrologue();
- void BuildSuspendPoint(Expression* suspend_expr);
+ void BuildSuspendPoint(int position);
+ void BuildAwait(int position = kNoSourcePosition);
void BuildAwait(Expression* await_expr);
- void BuildGetIterator(Expression* iterable, IteratorType hint);
+ void BuildFinalizeIteration(IteratorRecord iterator, Register done,
+ Register iteration_continuation_token);
+
+ void BuildGetIterator(IteratorType hint);
// Create an IteratorRecord with pre-allocated registers holding the next
// method and iterator object.
- IteratorRecord BuildGetIteratorRecord(Expression* iterable,
- Register iterator_next,
+ IteratorRecord BuildGetIteratorRecord(Register iterator_next,
Register iterator_object,
IteratorType hint);
// Create an IteratorRecord allocating new registers to hold the next method
// and iterator object.
- IteratorRecord BuildGetIteratorRecord(Expression* iterable,
- IteratorType hint);
+ IteratorRecord BuildGetIteratorRecord(IteratorType hint);
void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
void BuildIteratorClose(const IteratorRecord& iterator,
Expression* expr = nullptr);
@@ -180,24 +272,28 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeLabel* if_called,
BytecodeLabels* if_notcalled);
- void BuildArrayLiteralSpread(Spread* spread, Register array, Register index,
- FeedbackSlot index_slot,
- FeedbackSlot element_slot);
+ void BuildFillArrayWithIterator(IteratorRecord iterator, Register array,
+ Register index, Register value,
+ FeedbackSlot next_value_slot,
+ FeedbackSlot next_done_slot,
+ FeedbackSlot index_slot,
+ FeedbackSlot element_slot);
// Create Array literals. |expr| can be nullptr, but if provided,
// a boilerplate will be used to create an initial array for elements
// before the first spread.
- void BuildCreateArrayLiteral(ZonePtrList<Expression>* elements,
+ void BuildCreateArrayLiteral(const ZonePtrList<Expression>* elements,
ArrayLiteral* expr);
void BuildCreateObjectLiteral(Register literal, uint8_t flags, size_t entry);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void BuildClassLiteral(ClassLiteral* expr);
+ void BuildClassLiteral(ClassLiteral* expr, Register name);
+ void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
void VisitThisFunctionVariable(Variable* variable);
- void BuildInstanceFieldInitialization(Register constructor,
- Register instance);
+ void BuildInstanceMemberInitialization(Register constructor,
+ Register instance);
void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitSetHomeObject(Register value, Register home_object,
@@ -248,6 +344,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildTest(ToBooleanMode mode, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
+ template <typename TryBodyFunc, typename CatchBodyFunc>
+ void BuildTryCatch(TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryCatchStatement* stmt_for_coverage = nullptr);
+ template <typename TryBodyFunc, typename FinallyBodyFunc>
+ void BuildTryFinally(TryBodyFunc try_body_func,
+ FinallyBodyFunc finally_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryFinallyStatement* stmt_for_coverage = nullptr);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect. Some visitors return a TypeHint which
// specifies the type of the result of the visited expression.
@@ -351,7 +457,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Scope* current_scope_;
// External vector of literals to be eagerly compiled.
- ZoneVector<FunctionLiteral*>* eager_inner_literals_;
+ std::vector<FunctionLiteral*>* eager_inner_literals_;
FeedbackSlotCache* feedback_slot_cache_;
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index d79bf9a9d0..56f6297016 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -11,27 +11,27 @@ namespace interpreter {
static const int kLastParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kLastParamFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
- kPointerSize;
+ kSystemPointerSize;
static const int kCurrentContextRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kContextOffset) /
- kPointerSize;
+ kSystemPointerSize;
static const int kBytecodeArrayRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kBytecodeArrayFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kBytecodeOffsetRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kBytecodeOffsetFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kCallerPCOffsetRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kCallerPCOffsetFromFp) /
- kPointerSize;
+ kSystemPointerSize;
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index b5420f7e72..ae8bbe4275 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -91,7 +91,7 @@ class V8_EXPORT_PRIVATE Register final {
static const int kInvalidIndex = kMaxInt;
static const int kRegisterFileStartOffset =
- InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
+ InterpreterFrameConstants::kRegisterFileFromFp / kSystemPointerSize;
int index_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 39f61eb9bd..7efcd1ae62 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -255,8 +255,8 @@ namespace interpreter {
OperandType::kIdx, OperandType::kFlag8) \
V(CreateArrayFromIterable, AccumulatorUse::kReadWrite) \
V(CreateEmptyArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
+ V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
V(CloneObject, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kFlag8, OperandType::kIdx) \
@@ -687,6 +687,15 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kInvokeIntrinsic;
}
+ // Returns true if the bytecode is an one-shot bytecode. One-shot bytecodes
+ // don`t collect feedback and are intended for code that runs only once and
+ // shouldn`t be optimized.
+ static constexpr bool IsOneShotBytecode(Bytecode bytecode) {
+ return bytecode == Bytecode::kCallNoFeedback ||
+ bytecode == Bytecode::kLdaNamedPropertyNoFeedback ||
+ bytecode == Bytecode::kStaNamedPropertyNoFeedback;
+ }
+
// Returns true if the bytecode is a scaling prefix bytecode.
static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 47bb955374..d77960f7a1 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -66,12 +66,12 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
#if DEBUG
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
Isolate* isolate) const {
- std::set<Smi*> smis;
+ std::set<Smi> smis;
std::set<double> heap_numbers;
std::set<const AstRawString*> strings;
std::set<const char*> bigints;
std::set<const Scope*> scopes;
- std::set<Object*> deferred_objects;
+ std::set<Object, Object::Comparer> deferred_objects;
for (const Entry& entry : constants_) {
bool duplicate = false;
switch (entry.tag_) {
@@ -207,7 +207,7 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
return fixed_array;
}
-size_t ConstantArrayBuilder::Insert(Smi* smi) {
+size_t ConstantArrayBuilder::Insert(Smi smi) {
auto entry = smi_map_.find(smi);
if (entry == smi_map_.end()) {
return AllocateReservedEntry(smi);
@@ -312,7 +312,7 @@ void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
return slice->At(index).SetDeferred(object);
}
-void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi* smi) {
+void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi smi) {
ConstantArraySlice* slice = IndexToSlice(index);
// Allow others to reuse these Smis, but insert using emplace to avoid
// overwriting existing values in the Smi map (which may have a smaller
@@ -332,14 +332,14 @@ OperandSize ConstantArrayBuilder::CreateReservedEntry() {
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
- Smi* value) {
+ Smi value) {
index_t index = static_cast<index_t>(AllocateIndex(Entry(value)));
smi_map_[value] = index;
return index;
}
size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
- Smi* value) {
+ Smi value) {
DiscardReservedEntry(operand_size);
size_t index;
auto entry = smi_map_.find(value);
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index f06983abfa..d736996d5e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects/smi.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -48,7 +49,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
static const size_t k32BitCapacity =
kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
- ConstantArrayBuilder(Zone* zone);
+ explicit ConstantArrayBuilder(Zone* zone);
// Generate a fixed array of constant handles based on inserted objects.
Handle<FixedArray> ToFixedArray(Isolate* isolate);
@@ -63,7 +64,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Insert an object into the constants array if it is not already present.
// Returns the array index associated with the object.
- size_t Insert(Smi* smi);
+ size_t Insert(Smi smi);
size_t Insert(double number);
size_t Insert(const AstRawString* raw_string);
size_t Insert(AstBigInt bigint);
@@ -87,7 +88,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Sets the jump table entry at |index| to |smi|. Note that |index| is the
// constant pool index, not the switch case value.
- void SetJumpTableSmi(size_t index, Smi* smi);
+ void SetJumpTableSmi(size_t index, Smi smi);
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
@@ -96,7 +97,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Commit reserved entry and returns the constant pool index for the
// SMI value.
- size_t CommitReservedEntry(OperandSize operand_size, Smi* value);
+ size_t CommitReservedEntry(OperandSize operand_size, Smi value);
// Discards constant pool reservation.
void DiscardReservedEntry(OperandSize operand_size);
@@ -111,7 +112,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
enum class Tag : uint8_t;
public:
- explicit Entry(Smi* smi) : smi_(smi), tag_(Tag::kSmi) {}
+ explicit Entry(Smi smi) : smi_(smi), tag_(Tag::kSmi) {}
explicit Entry(double heap_number)
: heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
explicit Entry(const AstRawString* raw_string)
@@ -143,7 +144,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
handle_ = handle;
}
- void SetJumpTableSmi(Smi* smi) {
+ void SetJumpTableSmi(Smi smi) {
DCHECK_EQ(tag_, Tag::kUninitializedJumpTableSmi);
tag_ = Tag::kJumpTableSmi;
smi_ = smi;
@@ -156,7 +157,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
union {
Handle<Object> handle_;
- Smi* smi_;
+ Smi smi_;
double heap_number_;
const AstRawString* raw_string_;
AstBigInt bigint_;
@@ -186,7 +187,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
index_t AllocateIndex(Entry constant_entry);
index_t AllocateIndexArray(Entry constant_entry, size_t size);
- index_t AllocateReservedEntry(Smi* value);
+ index_t AllocateReservedEntry(Smi value);
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
@@ -227,8 +228,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
base::KeyEqualityMatcher<intptr_t>,
ZoneAllocationPolicy>
constants_map_;
- ZoneMap<Smi*, index_t> smi_map_;
- ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ ZoneMap<Smi, index_t> smi_map_;
+ ZoneVector<std::pair<Smi, index_t>> smi_pairs_;
ZoneMap<double, index_t> heap_number_map_;
#define SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) int LOWER_NAME##_;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index cc8dfb1a30..dadfaa8783 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -13,7 +13,6 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/machine-type.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/zone/zone.h"
@@ -233,7 +232,7 @@ Node* InterpreterAssembler::RegisterLocation(Register reg) {
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return TimesPointerSize(index);
+ return TimesSystemPointerSize(index);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
@@ -243,12 +242,12 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::LoadRegister(Register reg) {
return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+ IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
- return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
- << kPointerSizeLog2);
+ return LoadAndUntagSmi(GetInterpretedFramePointer(),
+ reg.ToOperand() << kSystemPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
@@ -299,7 +298,7 @@ Node* InterpreterAssembler::RegisterLocationInRegisterList(
void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
+ IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2), value);
}
void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -309,7 +308,7 @@ void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
}
void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
- int offset = reg.ToOperand() << kPointerSizeLog2;
+ int offset = reg.ToOperand() << kSystemPointerSizeLog2;
StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
}
@@ -674,6 +673,11 @@ TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
return CodeStubAssembler::LoadFeedbackVector(function);
}
+Node* InterpreterAssembler::LoadFeedbackVectorUnchecked() {
+ TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
+ return CodeStubAssembler::LoadFeedbackVectorUnchecked(function);
+}
+
void InterpreterAssembler::CallPrologue() {
if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
// Bytecodes that make a call along the critical path save the bytecode
@@ -706,7 +710,7 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Node* slot_id) {
Comment("increment call count");
TNode<Smi> call_count =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
// The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
// count are used as flags. To increment the call count by 1 we hence
// have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
@@ -714,7 +718,7 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
- SKIP_WRITE_BARRIER, kPointerSize);
+ SKIP_WRITE_BARRIER, kTaggedSize);
}
void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
@@ -803,7 +807,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -818,13 +822,22 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
}
void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
- Node* feedback_vector,
+ Node* maybe_feedback_vector,
Node* slot_id) {
+ Label feedback_done(this);
+ // If feedback_vector is not valid, then nothing to do.
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
+
+ CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
+
// Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
+ IncrementCallCount(maybe_feedback_vector, slot_id);
// Collect the callable {target} feedback.
- CollectCallableFeedback(target, context, feedback_vector, slot_id);
+ CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
+ Goto(&feedback_done);
+
+ BIND(&feedback_done);
}
void InterpreterAssembler::CallJSAndDispatch(
@@ -898,10 +911,10 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
- Node* feedback_vector) {
+ Node* maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
Comment("call using CallWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), ConvertReceiverMode::kAny,
@@ -926,6 +939,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
VARIABLE(var_site, MachineRepresentation::kTagged);
Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
construct(this), construct_array(this, &var_site);
+ GotoIf(IsUndefined(feedback_vector), &construct);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1054,7 +1068,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1106,6 +1120,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// constructor _and_ spread the last argument at the same time.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Label extra_checks(this, Label::kDeferred), construct(this);
+ GotoIf(IsUndefined(feedback_vector), &construct);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1195,7 +1210,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1235,8 +1250,9 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Load(MachineType::Pointer(), function,
IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStubR(callable.descriptor(), result_size, code_target, context,
- args.reg_count(), args.base_reg_location(), function_entry);
+ return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
+ result_size, code_target, context, args.reg_count(),
+ args.base_reg_location(), function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
@@ -1403,7 +1419,7 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesPointerSize(target_bytecode));
+ TimesSystemPointerSize(target_bytecode));
return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
target_bytecode);
@@ -1460,7 +1476,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesPointerSize(target_index));
+ TimesSystemPointerSize(target_index));
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
next_bytecode);
@@ -1547,8 +1563,8 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* source_bytecode_table_index = IntPtrConstant(
static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
- Node* counter_offset =
- TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
+ Node* counter_offset = TimesSystemPointerSize(
+ IntPtrAdd(source_bytecode_table_index, target_bytecode));
Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset);
@@ -1771,24 +1787,14 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
// Record the type feedback collected for {object}.
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
+
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
Dispatch();
}
-void InterpreterAssembler::DeserializeLazyAndDispatch() {
- Node* context = GetContext();
- Node* bytecode_offset = BytecodeOffset();
- Node* bytecode = LoadBytecode(bytecode_offset);
-
- Node* target_handler =
- CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
- SmiTag(bytecode), SmiConstant(operand_scale()));
- DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 036e920837..20922bc8b4 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -147,6 +147,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Load the FeedbackVector for the current function.
compiler::TNode<FeedbackVector> LoadFeedbackVector();
+ // Load the FeedbackVector for the current function. The returned node
+ // could be undefined.
+ compiler::Node* LoadFeedbackVectorUnchecked();
+
// Increment the call count for a CALL_IC or construct call.
// The call count is located at feedback_vector[slot_id + 1].
void IncrementCallCount(compiler::Node* feedback_vector,
@@ -162,7 +166,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// |feedback_vector| at |slot_id|, and the call counts in
// the |feedback_vector| at |slot_id+1|.
void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* feedback_vector,
+ compiler::Node* maybe_feedback_vector,
compiler::Node* slot_id);
// Call JSFunction or Callable |function| with |args| arguments, possibly
@@ -270,9 +274,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
- // Lazily deserializes the current bytecode's handler and tail-calls into it.
- void DeserializeLazyAndDispatch();
-
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index d2dab6d8d8..0ac2146731 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -15,13 +15,17 @@
#include "src/debug/debug.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
+#include "src/ic/ic.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
+#include "src/objects/oddball.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -156,7 +160,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
- TNode<FeedbackVector> feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
@@ -172,9 +176,20 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
return CAST(name);
};
- accessor_asm.LoadGlobalIC(feedback_vector, feedback_slot, lazy_context,
- lazy_name, typeof_mode, &exit_point,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Label miss(this, Label::kDeferred);
+ ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ GotoIf(IsUndefined(maybe_feedback_vector), &miss);
+ accessor_asm.LoadGlobalIC(CAST(maybe_feedback_vector), feedback_slot,
+ lazy_context, lazy_name, typeof_mode, &exit_point,
+ slot_mode);
+
+ BIND(&miss);
+ {
+ exit_point.ReturnCallRuntime(
+ Runtime::kLoadGlobalIC_Miss, lazy_context(), lazy_name(),
+ ParameterToTagged(feedback_slot, slot_mode), maybe_feedback_vector,
+ SmiConstant(typeof_mode));
+ }
}
};
@@ -212,9 +227,23 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+
CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, smi_slot,
- feedback_vector);
+ maybe_vector);
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name,
+ language_mode);
+ Goto(&end);
+
+ Bind(&end);
Dispatch();
}
@@ -490,7 +519,7 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* feedback_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(feedback_slot);
@@ -539,11 +568,26 @@ IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Node* name = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector);
- SetAccumulator(result);
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+ var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
+ smi_slot, feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ Comment("KeyedLoadIC_no_feedback");
+ var_result.Bind(CallRuntime(Runtime::kKeyedLoadIC_Miss, context, object,
+ name, smi_slot, feedback_vector));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -554,23 +598,39 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- void StaNamedProperty(Callable ic) {
+ void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
Node* code_target = HeapConstant(ic.code());
Node* object = LoadRegisterAtOperandIndex(0);
Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, smi_slot, feedback_vector);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+ var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
+ name, value, smi_slot, maybe_vector));
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure =
+ CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ var_result.Bind(CallRuntime(Runtime::kStoreICNoFeedback_Miss, context,
+ value, object, name, language_mode,
+ SmiConstant(property_type)));
+ Goto(&end);
+
+ Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
};
@@ -582,7 +642,7 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
// accumulator.
IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
- StaNamedProperty(ic);
+ StaNamedProperty(ic, NamedPropertyType::kNotOwn);
}
// StaNamedOwnProperty <object> <name_index> <slot>
@@ -592,7 +652,7 @@ IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
// accumulator.
IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate());
- StaNamedProperty(ic);
+ StaNamedProperty(ic, NamedPropertyType::kOwn);
}
// StaNamedPropertyNoFeedback <object> <name_index>
@@ -623,16 +683,31 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, feedback_vector);
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+
+ var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
+ value, smi_slot, maybe_vector));
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ var_result.Bind(CallRuntime(Runtime::kKeyedStoreICNoFeedback_Miss, context,
+ value, object, name, language_mode));
+ Goto(&end);
+
+ Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -646,16 +721,29 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
+ var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
+ index, value, smi_slot, feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ var_result.Bind(CallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, context,
+ value, smi_slot, feedback_vector, array, index));
+ Goto(&end);
+
+ BIND(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -674,7 +762,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* vector_index = SmiTag(BytecodeOperandIdx(3));
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
@@ -812,11 +900,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* rhs = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, false);
+ maybe_feedback_vector, false);
SetAccumulator(result);
Dispatch();
}
@@ -826,11 +914,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* rhs = BytecodeOperandImmSmi(0);
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, true);
+ maybe_feedback_vector, true);
SetAccumulator(result);
Dispatch();
}
@@ -933,7 +1021,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* right = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
@@ -959,7 +1047,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
BinaryOperationFeedback::kNumber);
TNode<Smi> input_feedback =
SmiOr(var_left_feedback.value(), var_right_feedback.value());
- UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector,
+ UpdateFeedback(SmiOr(result_type, input_feedback), maybe_feedback_vector,
slot_index);
SetAccumulator(result);
Dispatch();
@@ -974,7 +1062,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
CallRuntime(Runtime::kBigIntBinaryOp, context, var_left_bigint.value(),
var_right_bigint.value(), SmiConstant(bitwise_op)));
UpdateFeedback(SmiOr(var_left_feedback.value(), var_right_feedback.value()),
- feedback_vector, slot_index);
+ maybe_feedback_vector, slot_index);
Dispatch();
}
@@ -982,7 +1070,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* left = GetAccumulator();
Node* right = BytecodeOperandImmSmi(0);
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
TVARIABLE(Smi, var_left_feedback);
@@ -1000,12 +1088,13 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
UpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
- feedback_vector, slot_index);
+ maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
BIND(&if_bigint_mix);
- UpdateFeedback(var_left_feedback.value(), feedback_vector, slot_index);
+ UpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
+ slot_index);
ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
};
@@ -1088,7 +1177,7 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
Node* operand = GetAccumulator();
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
VARIABLE(var_word32, MachineRepresentation::kWord32);
@@ -1105,15 +1194,15 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_feedback.value()), feedback_vector,
- slot_index);
+ UpdateFeedback(SmiOr(result_type, var_feedback.value()),
+ maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
// BigInt case.
BIND(&if_bigint);
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt), feedback_vector,
- slot_index);
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
+ maybe_feedback_vector, slot_index);
SetAccumulator(CallRuntime(Runtime::kBigIntUnaryOp, context,
var_bigint.value(),
SmiConstant(Operation::kBitwiseNot)));
@@ -1166,7 +1255,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
void UnaryOpWithFeedback() {
VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator());
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
VARIABLE(var_result, MachineRepresentation::kTagged);
VARIABLE(var_float_value, MachineRepresentation::kFloat64);
@@ -1245,7 +1334,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
}
BIND(&end);
- UpdateFeedback(var_feedback.value(), feedback_vector, slot_index);
+ UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1522,11 +1611,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* function = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
// Call the function and dispatch to the next handler.
CallJSAndDispatch(function, context, args, receiver_mode);
@@ -1555,11 +1644,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* function = LoadRegisterAtOperandIndex(0);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
switch (kRecieverAndArgOperandCount) {
case 0:
@@ -1710,12 +1799,12 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
Node* callable = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
- feedback_vector);
+ maybe_feedback_vector);
}
// ConstructWithSpread <first_arg> <arg_count>
@@ -1729,7 +1818,7 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
Node* constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
Node* result = ConstructWithSpread(constructor, context, new_target, args,
slot_id, feedback_vector);
@@ -1748,7 +1837,7 @@ IGNITION_HANDLER(Construct, InterpreterAssembler) {
Node* constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
Node* result = Construct(constructor, context, new_target, args, slot_id,
feedback_vector);
@@ -1788,8 +1877,9 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
}
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1872,12 +1962,17 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
Node* object = LoadRegisterAtOperandIndex(0);
Node* callable = GetAccumulator();
Node* slot_id = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
+ Label feedback_done(this);
+ GotoIf(IsUndefined(feedback_vector), &feedback_done);
+
// Record feedback for the {callable} in the {feedback_vector}.
CollectCallableFeedback(callable, context, feedback_vector, slot_id);
+ Goto(&feedback_done);
+ BIND(&feedback_done);
// Perform the actual instanceof operation.
SetAccumulator(InstanceOf(object, callable, context));
Dispatch();
@@ -2362,14 +2457,29 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* context = GetContext();
+
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context);
- SetAccumulator(result);
+ result.Bind(constructor_assembler.EmitCreateRegExpLiteral(
+ feedback_vector, slot_id, pattern, flags, context));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ feedback_vector, SmiTag(slot_id), pattern, flags));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(result.value());
Dispatch();
}
@@ -2378,12 +2488,15 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// Creates an array literal for literal index <literal_idx> with
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* context = GetContext();
Node* bytecode_flags = BytecodeOperandFlag(2);
Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
+ // No feedback, so handle it as a slow case.
+ GotoIf(IsUndefined(feedback_vector), &call_runtime);
+
Branch(IsSetWord32<CreateArrayLiteralFlags::FastCloneSupportedBit>(
bytecode_flags),
&fast_shallow_clone, &call_runtime);
@@ -2416,13 +2529,31 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
//
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(0);
Node* context = GetContext();
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateEmptyArrayLiteral(
- feedback_vector, slot_id, context);
- SetAccumulator(result);
+ result.Bind(constructor_assembler.EmitCreateEmptyArrayLiteral(
+ feedback_vector, slot_id, context));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ TNode<Map> array_map = LoadJSArrayElementsMap(GetInitialFastElementsKind(),
+ LoadNativeContext(context));
+ result.Bind(AllocateJSArray(GetInitialFastElementsKind(), array_map,
+ SmiConstant(0), SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(result.value());
Dispatch();
}
@@ -2443,12 +2574,15 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// Creates an object literal for literal index <literal_idx> with
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* bytecode_flags = BytecodeOperandFlag(2);
- // Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
+ // No feedback, so handle it as a slow case.
+ GotoIf(IsUndefined(feedback_vector), &if_not_fast_clone);
+
+ // Check if we can do a fast clone or have to call the runtime.
Branch(IsSetWord32<CreateObjectLiteralFlags::FastCloneSupportedBit>(
bytecode_flags),
&if_fast_clone, &if_not_fast_clone);
@@ -2459,7 +2593,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
feedback_vector, slot_id, &if_not_fast_clone);
- StoreRegisterAtOperandIndex(result, 3);
+ SetAccumulator(result);
Dispatch();
}
@@ -2477,7 +2611,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* result =
CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
SmiTag(slot_id), object_boilerplate_description, flags);
- StoreRegisterAtOperandIndex(result, 3);
+ SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
}
@@ -2506,11 +2640,23 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
Node* smi_flags = SmiTag(raw_flags);
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kCloneObjectIC, context, source,
- smi_flags, smi_slot, feedback_vector);
- SetAccumulator(result);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label no_feedback(this), end(this);
+ GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
+ var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
+ smi_flags, smi_slot, maybe_feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ var_result.Bind(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source,
+ smi_flags, smi_slot, maybe_feedback_vector));
+ Goto(&end);
+
+ BIND(&end);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -2520,12 +2666,15 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot = BytecodeOperandIdx(1);
+
+ Label call_runtime(this, Label::kDeferred);
+ GotoIf(IsUndefined(feedback_vector), &call_runtime);
+
TNode<Object> cached_value =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
- Label call_runtime(this, Label::kDeferred);
GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
SetAccumulator(cached_value);
@@ -2537,7 +2686,13 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
Node* context = GetContext();
Node* result =
CallRuntime(Runtime::kCreateTemplateObject, context, description);
+
+ Label end(this);
+ GotoIf(IsUndefined(feedback_vector), &end);
StoreFeedbackVectorSlot(feedback_vector, slot, result);
+ Goto(&end);
+
+ Bind(&end);
SetAccumulator(result);
Dispatch();
}
@@ -2552,18 +2707,30 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
Node* flags = BytecodeOperandFlag(2);
Node* context = GetContext();
Node* slot = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- TNode<Object> feedback_cell =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
+ Label if_undefined(this), load_feedback_done(this);
+ Variable feedback_cell(this, MachineRepresentation::kTagged);
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
+
+ GotoIf(IsUndefined(feedback_vector), &if_undefined);
+ feedback_cell.Bind(LoadFeedbackVectorSlot(feedback_vector, slot));
+ Goto(&load_feedback_done);
+
+ BIND(&if_undefined);
+ {
+ feedback_cell.Bind(LoadRoot(RootIndex::kNoFeedbackCell));
+ Goto(&load_feedback_done);
+ }
+
+ BIND(&load_feedback_done);
Label if_fast(this), if_slow(this, Label::kDeferred);
Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
&if_slow);
BIND(&if_fast);
{
- Node* result =
- CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
+ Node* result = CallBuiltin(Builtins::kFastNewClosure, context, shared,
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2576,8 +2743,8 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_newspace);
{
- Node* result =
- CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
+ Node* result = CallRuntime(Runtime::kNewClosure, context, shared,
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2585,7 +2752,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_oldspace);
{
Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
- feedback_cell);
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2754,6 +2921,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
CallRuntime(Runtime::kThrow, context, exception);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
// ReThrow
@@ -2765,6 +2933,7 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
// Abort <abort_reason>
@@ -2801,6 +2970,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2819,6 +2989,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2838,6 +3009,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2922,7 +3094,7 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* enumerator = GetAccumulator();
Node* vector_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
// The {enumerator} is either a Map or a FixedArray.
CSA_ASSERT(this, TaggedIsNotSmi(enumerator));
@@ -2948,7 +3120,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* feedback = SelectSmiConstant(
IntPtrLessThanOrEqual(enum_length, enum_indices_length),
ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
- UpdateFeedback(feedback, feedback_vector, vector_index);
+ UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
// Construct the cache info triple.
Node* cache_type = enumerator;
@@ -2964,7 +3136,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
CSA_ASSERT(this, IsFixedArray(enumerator));
// Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
vector_index);
// Construct the cache info triple.
@@ -2986,7 +3158,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Node* cache_array;
std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
Node* vector_index = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
// Load the next key from the enumeration array.
Node* key = LoadFixedArrayElement(CAST(cache_array), index, 0,
@@ -3005,7 +3177,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
BIND(&if_slow);
{
// Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
vector_index);
// Need to filter the {key} for the {receiver}.
@@ -3071,6 +3243,7 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// An invalid bytecode aborting execution if dispatched.
IGNITION_HANDLER(Illegal, InterpreterAssembler) {
Abort(AbortReason::kInvalidBytecode);
+ Unreachable();
}
// SuspendGenerator <generator> <first input register> <register count>
@@ -3194,7 +3367,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
: PoisoningMitigationLevel::kDontPoison,
- 0, builtin_index);
+ builtin_index);
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
@@ -3220,75 +3393,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
return code;
}
-namespace {
-
-// DeserializeLazy
-//
-// Deserialize the bytecode handler, store it in the dispatch table, and
-// finally jump there (preserving existing args).
-// We manually create a custom assembler instead of using the helper macros
-// above since no corresponding bytecode exists.
-class DeserializeLazyAssembler : public InterpreterAssembler {
- public:
- static const Bytecode kFakeBytecode = Bytecode::kIllegal;
-
- explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
- OperandScale operand_scale)
- : InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
-
- static void Generate(compiler::CodeAssemblerState* state,
- OperandScale operand_scale) {
- DeserializeLazyAssembler assembler(state, operand_scale);
- state->SetInitialDebugInformation("DeserializeLazy", __FILE__, __LINE__);
- assembler.GenerateImpl();
- }
-
- private:
- void GenerateImpl() { DeserializeLazyAndDispatch(); }
-
- DISALLOW_COPY_AND_ASSIGN(DeserializeLazyAssembler);
-};
-
-} // namespace
-
-Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
- OperandScale operand_scale,
- int builtin_index,
- const AssemblerOptions& options) {
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- std::string debug_name = std::string("DeserializeLazy");
- if (operand_scale > OperandScale::kSingle) {
- Bytecode prefix_bytecode =
- Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
- debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
- }
-
- compiler::CodeAssemblerState state(
- isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
- debug_name.c_str(),
- FLAG_untrusted_code_mitigations
- ? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
- 0, builtin_index);
-
- DeserializeLazyAssembler::Generate(&state, operand_scale);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
- PROFILE(isolate,
- CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(*code), debug_name.c_str()));
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_trace_ignition_codegen) {
- StdoutStream os;
- code->Disassemble(debug_name.c_str(), os);
- os << std::flush;
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 3e261bea9f..cbb41a7af0 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -183,7 +183,8 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall(
stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
stub_args[index++] = context;
- return __ CallStubN(callable.descriptor(), 1, input_count, stub_args);
+ return __ CallStubN(StubCallMode::kCallCodeObject, callable.descriptor(), 1,
+ input_count, stub_args);
}
Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
@@ -206,20 +207,6 @@ Node* IntrinsicsGenerator::HasProperty(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::RejectPromise(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kRejectPromise));
-}
-
-Node* IntrinsicsGenerator::ResolvePromise(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kResolvePromise));
-}
-
Node* IntrinsicsGenerator::ToString(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
@@ -352,6 +339,45 @@ Node* IntrinsicsGenerator::GetImportMetaObject(
return return_value.value();
}
+Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionEnter(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionReject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionResolve(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitUncaught);
+}
+
Node* IntrinsicsGenerator::AsyncGeneratorReject(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 608b0afcac..c89f0c01c7 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -13,26 +13,31 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(AsyncGeneratorReject, async_generator_reject, 2) \
- V(AsyncGeneratorResolve, async_generator_resolve, 3) \
- V(AsyncGeneratorYield, async_generator_yield, 3) \
- V(CreateJSGeneratorObject, create_js_generator_object, 2) \
- V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
- V(GeneratorClose, generator_close, 1) \
- V(GetImportMetaObject, get_import_meta_object, 0) \
- V(Call, call, -1) \
- V(CreateIterResultObject, create_iter_result_object, 2) \
- V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(RejectPromise, reject_promise, 3) \
- V(ResolvePromise, resolve_promise, 2) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
+#define INTRINSICS_LIST(V) \
+ V(AsyncFunctionAwaitCaught, async_function_await_caught, 2) \
+ V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 2) \
+ V(AsyncFunctionEnter, async_function_enter, 2) \
+ V(AsyncFunctionReject, async_function_reject, 3) \
+ V(AsyncFunctionResolve, async_function_resolve, 3) \
+ V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \
+ V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \
+ V(AsyncGeneratorReject, async_generator_reject, 2) \
+ V(AsyncGeneratorResolve, async_generator_resolve, 3) \
+ V(AsyncGeneratorYield, async_generator_yield, 3) \
+ V(CreateJSGeneratorObject, create_js_generator_object, 2) \
+ V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
+ V(GeneratorClose, generator_close, 1) \
+ V(GetImportMetaObject, get_import_meta_object, 0) \
+ V(Call, call, -1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
V(ToObject, to_object, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index ca53fa674c..4298003ce2 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -14,9 +14,10 @@
#include "src/counters-inl.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
-#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots.h"
+#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
@@ -29,9 +30,10 @@ namespace interpreter {
class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
public:
- InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ InterpreterCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ std::vector<FunctionLiteral*>* eager_inner_literals);
protected:
Status ExecuteJobImpl() final;
@@ -48,7 +50,9 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
-Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+Interpreter::Interpreter(Isolate* isolate)
+ : isolate_(isolate),
+ interpreter_entry_trampoline_instruction_start_(kNullAddress) {
memset(dispatch_table_, 0, sizeof(dispatch_table_));
if (FLAG_trace_ignition_dispatches) {
@@ -73,31 +77,15 @@ int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
} // namespace
-Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
- Bytecode bytecode, OperandScale operand_scale) {
+Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
int builtin_index = BuiltinIndexFromBytecode(bytecode, operand_scale);
Builtins* builtins = isolate_->builtins();
- Code* code = builtins->builtin(builtin_index);
-
- // Already deserialized? Then just return the handler.
- if (!Builtins::IsLazyDeserializer(code)) return code;
-
- DCHECK(FLAG_lazy_deserialization);
- DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- code = Snapshot::DeserializeBuiltin(isolate_, builtin_index);
-
- DCHECK(code->IsCode());
- DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
- DCHECK(!Builtins::IsLazyDeserializer(code));
-
- SetBytecodeHandler(bytecode, operand_scale, code);
-
- return code;
+ return builtins->builtin(builtin_index);
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale,
- Code* handler) {
+ OperandScale operand_scale, Code handler) {
DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table_[index] = handler->InstructionStart();
@@ -113,19 +101,36 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
+ if (FLAG_embedded_builtins && !isolate_->serializer_enabled() &&
+ isolate_->embedded_blob() != nullptr) {
+// If builtins are embedded (and we're not generating a snapshot), then
+// every bytecode handler will be off-heap, so there's no point iterating
+// over them.
+#ifdef DEBUG
+ for (int i = 0; i < kDispatchTableSize; i++) {
+ Address code_entry = dispatch_table_[i];
+ CHECK(code_entry == kNullAddress ||
+ InstructionStream::PcIsOffHeap(isolate_, code_entry));
+ }
+#endif // ENABLE_SLOW_DCHECKS
+ return;
+ }
+
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
-
- // If the handler is embedded, it is immovable.
+ // Skip over off-heap bytecode handlers since they will never move.
if (InstructionStream::PcIsOffHeap(isolate_, code_entry)) continue;
- Object* code = code_entry == kNullAddress
- ? nullptr
- : Code::GetCodeFromTargetAddress(code_entry);
- Object* old_code = code;
- v->VisitRootPointer(Root::kDispatchTable, nullptr, &code);
+ // TODO(jkummerow): Would it hurt to simply do:
+ // if (code_entry == kNullAddress) continue;
+ Code code;
+ if (code_entry != kNullAddress) {
+ code = Code::GetCodeFromTargetAddress(code_entry);
+ }
+ Code old_code = code;
+ v->VisitRootPointer(Root::kDispatchTable, nullptr, FullObjectSlot(&code));
if (code != old_code) {
- dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
+ dispatch_table_[i] = code->entry();
}
}
}
@@ -168,7 +173,7 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals)
+ std::vector<FunctionLiteral*>* eager_inner_literals)
: UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
&compilation_info_),
zone_(allocator, ZONE_NAME),
@@ -228,7 +233,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals) {
+ std::vector<FunctionLiteral*>* eager_inner_literals) {
return new InterpreterCompilationJob(parse_info, literal, allocator,
eager_inner_literals);
}
@@ -248,13 +253,23 @@ void Interpreter::ForEachBytecode(
}
}
-void Interpreter::InitializeDispatchTable() {
+void Interpreter::Initialize() {
Builtins* builtins = isolate_->builtins();
- Code* illegal = builtins->builtin(Builtins::kIllegalHandler);
+
+ // Set the interpreter entry trampoline entry point now that builtins are
+ // initialized.
+ Handle<Code> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
+ DCHECK(builtins->is_initialized());
+ DCHECK(code->is_off_heap_trampoline() ||
+ isolate_->heap()->IsImmovable(*code));
+ interpreter_entry_trampoline_instruction_start_ = code->InstructionStart();
+
+ // Initialize the dispatch table.
+ Code illegal = builtins->builtin(Builtins::kIllegalHandler);
int builtin_id = Builtins::kFirstBytecodeHandler;
ForEachBytecode([=, &builtin_id](Bytecode bytecode,
OperandScale operand_scale) {
- Code* handler = illegal;
+ Code handler = illegal;
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
std::string builtin_name(Builtins::name(builtin_id));
@@ -268,22 +283,13 @@ void Interpreter::InitializeDispatchTable() {
});
DCHECK(builtin_id == Builtins::builtin_count);
DCHECK(IsDispatchTableInitialized());
-
-#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
- if (!isolate_->serializer_enabled() && FLAG_perf_prof_unwinding_info) {
- StdoutStream{}
- << "Warning: The --perf-prof-unwinding-info flag can be passed at "
- "mksnapshot time to get better results."
- << std::endl;
- }
-#endif
}
bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != kNullAddress;
}
-const char* Interpreter::LookupNameOfBytecodeHandler(const Code* code) {
+const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) {
#ifdef ENABLE_DISASSEMBLER
#define RETURN_NAME(Name, ...) \
if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 5023b0ef00..c0dece5aae 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -19,7 +19,6 @@ namespace v8 {
namespace internal {
class Isolate;
-class BuiltinDeserializerAllocator;
class Callable;
class UnoptimizedCompilationJob;
class FunctionLiteral;
@@ -47,28 +46,27 @@ class Interpreter {
static UnoptimizedCompilationJob* NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
- Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale);
+ Code GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
- Code* handler);
+ Code handler);
// GC support.
void IterateDispatchTable(RootVisitor* v);
// Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
- const char* LookupNameOfBytecodeHandler(const Code* code);
+ const char* LookupNameOfBytecodeHandler(const Code code);
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
- void InitializeDispatchTable();
+ void Initialize();
bool IsDispatchTableInitialized() const;
@@ -80,10 +78,14 @@ class Interpreter {
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
+ Address address_of_interpreter_entry_trampoline_instruction_start() const {
+ return reinterpret_cast<Address>(
+ &interpreter_entry_trampoline_instruction_start_);
+ }
+
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
- friend class v8::internal::BuiltinDeserializerAllocator;
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
@@ -98,6 +100,7 @@ class Interpreter {
Isolate* isolate_;
Address dispatch_table_[kDispatchTableSize];
std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
+ Address interpreter_entry_trampoline_instruction_start_;
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};