summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2017-08-01 11:36:44 -0500
committerMyles Borins <mylesborins@google.com>2017-08-01 15:23:15 -0500
commit0a66b223e149a841669bfad5598e4254589730cb (patch)
tree5ec050f7f78aafbf5b1e0e50d639fb843141e162 /deps/v8/src/interpreter
parent1782b3836ba58ef0da6b687f2bb970c0bd8199ad (diff)
downloadnode-new-0a66b223e149a841669bfad5598e4254589730cb.tar.gz
deps: update V8 to 6.0.286.52
PR-URL: https://github.com/nodejs/node/pull/14004 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc99
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h59
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc125
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h41
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc55
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h26
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc15
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc444
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h9
-rw-r--r--deps/v8/src/interpreter/bytecode-jump-table.h88
-rw-r--r--deps/v8/src/interpreter/bytecode-node.cc (renamed from deps/v8/src/interpreter/bytecode-pipeline.cc)12
-rw-r--r--deps/v8/src/interpreter/bytecode-node.h (renamed from deps/v8/src/interpreter/bytecode-pipeline.h)166
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h12
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.cc24
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.h98
-rw-r--r--deps/v8/src/interpreter/bytecodes.h15
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc39
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h29
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc36
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h16
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc118
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc685
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc109
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h51
-rw-r--r--deps/v8/src/interpreter/interpreter.cc10
-rw-r--r--deps/v8/src/interpreter/interpreter.h3
28 files changed, 1358 insertions, 1061 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index cc6777588a..c3a0b3cb9e 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -168,11 +168,14 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
+Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
+ return FixedArray::get(bytecode_array()->constant_pool(), index,
+ bytecode_array()->GetIsolate());
+}
+
Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
int operand_index) const {
- return FixedArray::get(bytecode_array()->constant_pool(),
- GetIndexOperand(operand_index),
- bytecode_array()->GetIsolate());
+ return GetConstantAtIndex(GetIndexOperand(operand_index));
}
int BytecodeArrayAccessor::GetJumpTargetOffset() const {
@@ -182,16 +185,31 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
if (bytecode == Bytecode::kJumpLoop) {
relative_offset = -relative_offset;
}
- return current_offset() + relative_offset + current_prefix_offset();
+ return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
- return current_offset() + smi->value() + current_prefix_offset();
+ return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
return kMinInt;
}
}
+JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
+ const {
+ DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+
+ uint32_t table_start = GetIndexOperand(0);
+ uint32_t table_size = GetUnsignedImmediateOperand(1);
+ int32_t case_value_base = GetImmediateOperand(2);
+
+ return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
+}
+
+int BytecodeArrayAccessor::GetAbsoluteOffset(int relative_offset) const {
+ return current_offset() + relative_offset + current_prefix_offset();
+}
+
bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
return current_offset() <= offset &&
offset < current_offset() + current_bytecode_size();
@@ -203,6 +221,77 @@ std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
bytecode_array()->parameter_count());
}
+JumpTableTargetOffsets::JumpTableTargetOffsets(
+ const BytecodeArrayAccessor* accessor, int table_start, int table_size,
+ int case_value_base)
+ : accessor_(accessor),
+ table_start_(table_start),
+ table_size_(table_size),
+ case_value_base_(case_value_base) {}
+
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::begin() const {
+ return iterator(case_value_base_, table_start_, table_start_ + table_size_,
+ accessor_);
+}
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::end() const {
+ return iterator(case_value_base_ + table_size_, table_start_ + table_size_,
+ table_start_ + table_size_, accessor_);
+}
+int JumpTableTargetOffsets::size() const {
+ int ret = 0;
+ // TODO(leszeks): Is there a more efficient way of doing this than iterating?
+ for (const auto& entry : *this) {
+ USE(entry);
+ ret++;
+ }
+ return ret;
+}
+
+JumpTableTargetOffsets::iterator::iterator(
+ int case_value, int table_offset, int table_end,
+ const BytecodeArrayAccessor* accessor)
+ : accessor_(accessor),
+ index_(case_value),
+ table_offset_(table_offset),
+ table_end_(table_end) {
+ UpdateAndAdvanceToValid();
+}
+
+JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
+ DCHECK_LT(table_offset_, table_end_);
+ DCHECK(current_->IsSmi());
+ return {index_, accessor_->GetAbsoluteOffset(Smi::cast(*current_)->value())};
+}
+
+JumpTableTargetOffsets::iterator& JumpTableTargetOffsets::iterator::
+operator++() {
+ DCHECK_LT(table_offset_, table_end_);
+ ++table_offset_;
+ ++index_;
+ UpdateAndAdvanceToValid();
+ return *this;
+}
+
+bool JumpTableTargetOffsets::iterator::operator!=(
+ const JumpTableTargetOffsets::iterator& other) {
+ DCHECK_EQ(accessor_, other.accessor_);
+ DCHECK_EQ(table_end_, other.table_end_);
+ DCHECK_EQ(index_ - other.index_, table_offset_ - other.table_offset_);
+ return index_ != other.index_;
+}
+
+void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
+ if (table_offset_ >= table_end_) return;
+
+ current_ = accessor_->GetConstantAtIndex(table_offset_);
+ Isolate* isolate = accessor_->bytecode_array()->GetIsolate();
+ while (current_->IsTheHole(isolate)) {
+ ++table_offset_;
+ ++index_;
+ current_ = accessor_->GetConstantAtIndex(table_offset_);
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index e5a24f3e7f..e465a5c881 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -16,6 +16,50 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayAccessor;
+
+struct V8_EXPORT_PRIVATE JumpTableTargetOffset {
+ int case_value;
+ int target_offset;
+};
+
+class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
+ public:
+ // Minimal iterator implementation for use in ranged-for.
+ class V8_EXPORT_PRIVATE iterator final {
+ public:
+ iterator(int case_value, int table_offset, int table_end,
+ const BytecodeArrayAccessor* accessor);
+
+ JumpTableTargetOffset operator*();
+ iterator& operator++();
+ bool operator!=(const iterator& other);
+
+ private:
+ void UpdateAndAdvanceToValid();
+
+ const BytecodeArrayAccessor* accessor_;
+ Handle<Object> current_;
+ int index_;
+ int table_offset_;
+ int table_end_;
+ };
+
+ JumpTableTargetOffsets(const BytecodeArrayAccessor* accessor, int table_start,
+ int table_size, int case_value_base);
+
+ iterator begin() const;
+ iterator end() const;
+
+ int size() const;
+
+ private:
+ const BytecodeArrayAccessor* accessor_;
+ int table_start_;
+ int table_size_;
+ int case_value_base_;
+};
+
class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
public:
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
@@ -41,12 +85,21 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int GetRegisterOperandRange(int operand_index) const;
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ Handle<Object> GetConstantAtIndex(int offset) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
- // Returns the absolute offset of the branch target at the current
- // bytecode. It is an error to call this method if the bytecode is
- // not for a jump or conditional jump.
+ // Returns the absolute offset of the branch target at the current bytecode.
+ // It is an error to call this method if the bytecode is not for a jump or
+ // conditional jump.
int GetJumpTargetOffset() const;
+ // Returns an iterator over the absolute offsets of the targets of the current
+ // switch bytecode's jump table. It is an error to call this method if the
+ // bytecode is not a switch.
+ JumpTableTargetOffsets GetJumpTableTargetOffsets() const;
+
+ // Returns the absolute offset of the bytecode at the given relative offset
+ // from the current bytecode.
+ int GetAbsoluteOffset(int relative_offset) const;
bool OffsetWithinBytecode(int offset) const;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 5634e1a6fd..80c59e4c47 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -6,8 +6,11 @@
#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register-optimizer.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -35,8 +38,8 @@ class RegisterTransferWriter final
};
BytecodeArrayBuilder::BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int context_count,
- int locals_count, FunctionLiteral* literal,
+ Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
+ FunctionLiteral* literal,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: zone_(zone),
literal_(literal),
@@ -46,14 +49,11 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
return_seen_in_block_(false),
parameter_count_(parameter_count),
local_register_count_(locals_count),
- context_register_count_(context_count),
register_allocator_(fixed_register_count()),
bytecode_array_writer_(zone, &constant_array_builder_,
source_position_mode),
- pipeline_(&bytecode_array_writer_),
register_optimizer_(nullptr) {
DCHECK_GE(parameter_count_, 0);
- DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
if (FLAG_ignition_reo) {
@@ -65,16 +65,6 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
return_position_ = literal ? literal->return_position() : kNoSourcePosition;
}
-Register BytecodeArrayBuilder::first_context_register() const {
- DCHECK_GT(context_register_count_, 0);
- return Register(local_register_count_);
-}
-
-Register BytecodeArrayBuilder::last_context_register() const {
- DCHECK_GT(context_register_count_, 0);
- return Register(local_register_count_ + context_register_count_ - 1);
-}
-
Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
// The parameter indices are shifted by 1 (receiver is the
@@ -106,8 +96,8 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
- return pipeline_->ToBytecodeArray(isolate, register_count, parameter_count(),
- handler_table);
+ return bytecode_array_writer_.ToBytecodeArray(
+ isolate, register_count, parameter_count(), handler_table);
}
BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
@@ -134,7 +124,7 @@ void BytecodeArrayBuilder::SetDeferredSourceInfo(
if (deferred_source_info_.is_valid()) {
// Emit any previous deferred source info now as a nop.
BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- pipeline()->Write(&node);
+ bytecode_array_writer_.Write(&node);
}
deferred_source_info_ = source_info;
}
@@ -146,19 +136,25 @@ void BytecodeArrayBuilder::AttachOrEmitDeferredSourceInfo(BytecodeNode* node) {
node->set_source_info(deferred_source_info_);
} else {
BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- pipeline()->Write(&node);
+ bytecode_array_writer_.Write(&node);
}
deferred_source_info_.set_invalid();
}
void BytecodeArrayBuilder::Write(BytecodeNode* node) {
AttachOrEmitDeferredSourceInfo(node);
- pipeline()->Write(node);
+ bytecode_array_writer_.Write(node);
}
void BytecodeArrayBuilder::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
AttachOrEmitDeferredSourceInfo(node);
- pipeline()->WriteJump(node, label);
+ bytecode_array_writer_.WriteJump(node, label);
+}
+
+void BytecodeArrayBuilder::WriteSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ AttachOrEmitDeferredSourceInfo(node);
+ bytecode_array_writer_.WriteSwitch(node, jump_table);
}
void BytecodeArrayBuilder::OutputLdarRaw(Register reg) {
@@ -294,8 +290,9 @@ class BytecodeNodeBuilder {
public:
template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
- BytecodeSourceInfo source_info,
Operands... operands)) {
+ static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,
+ "too many operands for bytecode");
builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
@@ -305,37 +302,45 @@ class BytecodeNodeBuilder {
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
- source_info,
+ builder->CurrentSourcePosition(bytecode),
OperandHelper<operand_types>::Convert(builder, operands)...);
}
};
-#define DEFINE_BYTECODE_OUTPUT(name, ...) \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(Operands... operands) { \
- static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
- "too many operands for bytecode"); \
- BytecodeNode node( \
- BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
- Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
- operands...)); \
- Write(&node); \
- } \
- \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
- Operands... operands) { \
- DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
- BytecodeNode node( \
- BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
- Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
- operands...)); \
- WriteJump(&node, label); \
- LeaveBasicBlock(); \
+#define DEFINE_BYTECODE_OUTPUT(name, ...) \
+ template <typename... Operands> \
+ BytecodeNode BytecodeArrayBuilder::Create##name##Node( \
+ Operands... operands) { \
+ return BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make( \
+ this, operands...); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ BytecodeNode node(Create##name##Node(operands...)); \
+ Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node(Create##name##Node(operands...)); \
+ WriteJump(&node, label); \
+ LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
+void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ BytecodeNode node(CreateSwitchOnSmiNoFeedbackNode(
+ jump_table->constant_pool_index(), jump_table->size(),
+ jump_table->case_value_base()));
+ WriteSwitch(&node, jump_table);
+ LeaveBasicBlock();
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg,
int feedback_slot) {
@@ -995,14 +1000,24 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label.
if (register_optimizer_) register_optimizer_->Flush();
- pipeline_->BindLabel(label);
+ bytecode_array_writer_.BindLabel(label);
LeaveBasicBlock();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
BytecodeLabel* label) {
- pipeline_->BindLabel(target, label);
+ bytecode_array_writer_.BindLabel(target, label);
+ LeaveBasicBlock();
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table,
+ int case_value) {
+ // Flush the register optimizer when binding a jump table entry to ensure
+ // all expected registers are valid when jumping to this location.
+ if (register_optimizer_) register_optimizer_->Flush();
+ bytecode_array_writer_.BindJumpTableEntry(jump_table, case_value);
LeaveBasicBlock();
return *this;
}
@@ -1121,6 +1136,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ OutputSwitchOnSmiNoFeedback(jump_table);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
if (position != kNoSourcePosition) {
// We need to attach a non-breakable source position to a stack
@@ -1386,6 +1407,16 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(const Scope* scope) {
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
#undef ENTRY_GETTER
+BytecodeJumpTable* BytecodeArrayBuilder::AllocateJumpTable(
+ int size, int case_value_base) {
+ DCHECK_GT(size, 0);
+
+ size_t constant_pool_index = constant_array_builder()->InsertJumpTable(size);
+
+ return new (zone())
+ BytecodeJumpTable(constant_pool_index, size, case_value_base, zone());
+}
+
size_t BytecodeArrayBuilder::AllocateDeferredConstantPoolEntry() {
return constant_array_builder()->InsertDeferred();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index bc6d5a39d4..fa336cde13 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -12,6 +12,7 @@
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
@@ -26,16 +27,16 @@ namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
-class BytecodePipelineStage;
class BytecodeRegisterOptimizer;
+class BytecodeJumpTable;
class Register;
class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int context_count,
- int locals_count, FunctionLiteral* literal = nullptr,
+ Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
+ FunctionLiteral* literal = nullptr,
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
@@ -53,17 +54,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
return local_register_count_;
}
- // Get number of contexts required for bytecode array.
- int context_count() const {
- DCHECK_GE(context_register_count_, 0);
- return context_register_count_;
- }
-
- Register first_context_register() const;
- Register last_context_register() const;
-
// Returns the number of fixed (non-temporary) registers.
- int fixed_register_count() const { return context_count() + locals_count(); }
+ int fixed_register_count() const { return locals_count(); }
// Returns the number of fixed and temporary registers.
int total_register_count() const {
@@ -359,6 +351,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+ BytecodeArrayBuilder& Bind(BytecodeJumpTable* jump_table, int case_value);
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
@@ -376,6 +369,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& JumpIfNotNil(BytecodeLabel* label, Token::Value op,
NilValue nil);
+ BytecodeArrayBuilder& SwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
+
BytecodeArrayBuilder& StackCheck(int position);
// Sets the pending message to the value in the accumulator, and returns the
@@ -413,6 +408,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Allocates a new jump table of given |size| and |case_value_base| in the
+ // constant pool.
+ BytecodeJumpTable* AllocateJumpTable(int size, int case_value_base);
+
// Gets a constant pool entry.
size_t GetConstantPoolEntry(const AstRawString* raw_string);
size_t GetConstantPoolEntry(const AstValue* heap_number);
@@ -483,14 +482,18 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Returns the current source position for the given |bytecode|.
INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
-#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
- template <typename... Operands> \
- INLINE(void Output##Name(Operands... operands)); \
- template <typename... Operands> \
+#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
+ template <typename... Operands> \
+ INLINE(BytecodeNode Create##Name##Node(Operands... operands)); \
+ template <typename... Operands> \
+ INLINE(void Output##Name(Operands... operands)); \
+ template <typename... Operands> \
INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO
+ INLINE(void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table));
+
bool RegisterIsValid(Register reg) const;
bool RegisterListIsValid(RegisterList reg_list) const;
@@ -507,6 +510,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Write bytecode to bytecode array.
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label);
// Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured
@@ -521,7 +525,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayWriter* bytecode_array_writer() {
return &bytecode_array_writer_;
}
- BytecodePipelineStage* pipeline() { return pipeline_; }
ConstantArrayBuilder* constant_array_builder() {
return &constant_array_builder_;
}
@@ -540,11 +543,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
- int context_register_count_;
int return_position_;
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
- BytecodePipelineStage* pipeline_;
BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
BytecodeSourceInfo deferred_source_info_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 22eabc6159..d3cc0204d4 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -5,8 +5,11 @@
#include "src/interpreter/bytecode-array-writer.h"
#include "src/api.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
#include "src/objects-inl.h"
@@ -33,10 +36,6 @@ BytecodeArrayWriter::BytecodeArrayWriter(
bytecodes_.reserve(512); // Derived via experimentation.
}
-// override
-BytecodeArrayWriter::~BytecodeArrayWriter() {}
-
-// override
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
@@ -57,7 +56,6 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
return bytecode_array;
}
-// override
void BytecodeArrayWriter::Write(BytecodeNode* node) {
DCHECK(!Bytecodes::IsJump(node->bytecode()));
@@ -69,7 +67,6 @@ void BytecodeArrayWriter::Write(BytecodeNode* node) {
EmitBytecode(node);
}
-// override
void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
@@ -83,7 +80,20 @@ void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
EmitJump(node, label);
}
-// override
+void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ DCHECK(Bytecodes::IsSwitch(node->bytecode()));
+
+ // TODO(rmcilroy): For jump tables we could also mark the table as dead,
+ // thereby avoiding emitting dead code when we bind the entries.
+ if (exit_seen_in_block_) return; // Don't emit dead code.
+ UpdateExitSeenInBlock(node->bytecode());
+ MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
+
+ UpdateSourcePositionTable(node);
+ EmitSwitch(node, jump_table);
+}
+
void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
size_t current_offset = bytecodes()->size();
if (label->is_forward_target()) {
@@ -96,7 +106,6 @@ void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
exit_seen_in_block_ = false; // Starting a new basic block.
}
-// override
void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
DCHECK(!label->is_bound());
@@ -112,6 +121,22 @@ void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
// changed here.
}
+void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
+ int case_value) {
+ DCHECK(!jump_table->is_bound(case_value));
+
+ size_t current_offset = bytecodes()->size();
+ size_t relative_jump = current_offset - jump_table->switch_bytecode_offset();
+
+ constant_array_builder()->SetJumpTableSmi(
+ jump_table->ConstantPoolEntryFor(case_value),
+ Smi::FromInt(static_cast<int>(relative_jump)));
+ jump_table->mark_bound(case_value);
+
+ InvalidateLastBytecode();
+ exit_seen_in_block_ = false; // Starting a new basic block.
+}
+
void BytecodeArrayWriter::UpdateSourcePositionTable(
const BytecodeNode* const node) {
int bytecode_offset = static_cast<int>(bytecodes()->size());
@@ -393,6 +418,20 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
EmitBytecode(node);
}
+void BytecodeArrayWriter::EmitSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ DCHECK(Bytecodes::IsSwitch(node->bytecode()));
+
+ size_t current_offset = bytecodes()->size();
+ if (node->operand_scale() > OperandScale::kSingle) {
+ // Adjust for scaling byte prefix.
+ current_offset += 1;
+ }
+ jump_table->set_switch_bytecode_offset(current_offset);
+
+ EmitBytecode(node);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 6e9fc02ad8..b2dfae1ddd 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecodes.h"
#include "src/source-position-table.h"
namespace v8 {
@@ -18,26 +18,27 @@ class SourcePositionTableBuilder;
namespace interpreter {
class BytecodeLabel;
+class BytecodeNode;
+class BytecodeJumpTable;
class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
-class V8_EXPORT_PRIVATE BytecodeArrayWriter final
- : public NON_EXPORTED_BASE(BytecodePipelineStage) {
+class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
public:
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode);
- virtual ~BytecodeArrayWriter();
- // BytecodePipelineStage interface.
- void Write(BytecodeNode* node) override;
- void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
- void BindLabel(BytecodeLabel* label) override;
- void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
- Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) override;
+ void Write(BytecodeNode* node);
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
+ void BindLabel(BytecodeLabel* label);
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label);
+ void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
+ Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
+ int parameter_count,
+ Handle<FixedArray> handler_table);
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
@@ -63,6 +64,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final
void EmitBytecode(const BytecodeNode* const node);
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
+ void EmitSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void UpdateSourcePositionTable(const BytecodeNode* const node);
void UpdateExitSeenInBlock(Bytecode bytecode);
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 25f5260de2..4d50bf69c3 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -23,19 +23,10 @@ uint8_t CreateArrayLiteralFlags::Encode(bool use_fast_shallow_clone,
}
// static
-uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
- int properties_count,
- int runtime_flags) {
+uint8_t CreateObjectLiteralFlags::Encode(int runtime_flags,
+ bool fast_clone_supported) {
uint8_t result = FlagsBits::encode(runtime_flags);
- if (fast_clone_supported) {
- STATIC_ASSERT(
- ConstructorBuiltins::kMaximumClonedShallowObjectProperties <=
- 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
- DCHECK_LE(properties_count,
- ConstructorBuiltins::kMaximumClonedShallowObjectProperties);
- result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
- properties_count);
- }
+ result |= FastCloneSupportedBit::encode(fast_clone_supported);
return result;
}
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 0256bc249b..76e5f868c5 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -18,7 +18,7 @@ namespace interpreter {
class CreateArrayLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 3> {};
+ class FlagsBits : public BitField8<int, 0, 4> {};
class FastShallowCloneBit : public BitField8<bool, FlagsBits::kNext, 1> {};
static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
@@ -29,12 +29,10 @@ class CreateArrayLiteralFlags {
class CreateObjectLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 3> {};
- class FastClonePropertiesCountBits
- : public BitField8<int, FlagsBits::kNext, 3> {};
+ class FlagsBits : public BitField8<int, 0, 4> {};
+ class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
- static uint8_t Encode(bool fast_clone_supported, int properties_count,
- int runtime_flags);
+ static uint8_t Encode(int runtime_flags, bool fast_clone_supported);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CreateObjectLiteralFlags);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 87f2e1f0a6..7ca2c37607 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -11,6 +11,7 @@
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
@@ -40,8 +41,8 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
depth_ = outer_->depth_ + 1;
// Push the outer context into a new context register.
- Register outer_context_reg(builder()->first_context_register().index() +
- outer_->depth_);
+ Register outer_context_reg =
+ generator_->register_allocator()->NewRegister();
outer_->set_register(outer_context_reg);
generator_->builder()->PushContext(outer_context_reg);
}
@@ -145,7 +146,10 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
: generator_(generator),
deferred_(generator->zone()),
token_register_(token_register),
- result_register_(result_register) {}
+ result_register_(result_register),
+ return_token_(-1),
+ async_return_token_(-1),
+ rethrow_token_(-1) {}
// One recorded control-flow command.
struct Entry {
@@ -158,8 +162,12 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// generates a new dispatch token that identifies one particular path. This
// expects the result to be in the accumulator.
void RecordCommand(Command command, Statement* statement) {
- int token = static_cast<int>(deferred_.size());
- deferred_.push_back({command, statement, token});
+ int token = GetTokenForCommand(command, statement);
+
+ DCHECK_LT(token, deferred_.size());
+ DCHECK_EQ(deferred_[token].command, command);
+ DCHECK_EQ(deferred_[token].statement, statement);
+ DCHECK_EQ(deferred_[token].token, token);
builder()->StoreAccumulatorInRegister(result_register_);
builder()->LoadLiteral(Smi::FromInt(token));
@@ -184,32 +192,98 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Applies all recorded control-flow commands after the finally-block again.
// This generates a dynamic dispatch on the token from the entry point.
void ApplyDeferredCommands() {
- // The fall-through path is covered by the default case, hence +1 here.
- SwitchBuilder dispatch(builder(), static_cast<int>(deferred_.size() + 1));
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Entry& entry = deferred_[i];
- builder()->LoadLiteral(Smi::FromInt(entry.token));
- builder()->CompareOperation(Token::EQ_STRICT, token_register_);
- dispatch.Case(ToBooleanMode::kAlreadyBoolean, static_cast<int>(i));
- }
- dispatch.DefaultAt(static_cast<int>(deferred_.size()));
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Entry& entry = deferred_[i];
- dispatch.SetCaseTarget(static_cast<int>(i));
+ if (deferred_.size() == 0) return;
+
+ BytecodeLabel fall_through;
+
+ if (deferred_.size() == 1) {
+ // For a single entry, just jump to the fallthrough if we don't match the
+ // entry token.
+ const Entry& entry = deferred_[0];
+
+ builder()
+ ->LoadLiteral(Smi::FromInt(entry.token))
+ .CompareOperation(Token::EQ_STRICT, token_register_)
+ .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &fall_through);
+
builder()->LoadAccumulatorWithRegister(result_register_);
execution_control()->PerformCommand(entry.command, entry.statement);
+ } else {
+ // For multiple entries, build a jump table and switch on the token,
+ // jumping to the fallthrough if none of them match.
+
+ BytecodeJumpTable* jump_table =
+ builder()->AllocateJumpTable(static_cast<int>(deferred_.size()), 0);
+ builder()
+ ->LoadAccumulatorWithRegister(token_register_)
+ .SwitchOnSmiNoFeedback(jump_table)
+ .Jump(&fall_through);
+ for (const Entry& entry : deferred_) {
+ builder()
+ ->Bind(jump_table, entry.token)
+ .LoadAccumulatorWithRegister(result_register_);
+ execution_control()->PerformCommand(entry.command, entry.statement);
+ }
}
- dispatch.SetCaseTarget(static_cast<int>(deferred_.size()));
+
+ builder()->Bind(&fall_through);
}
BytecodeArrayBuilder* builder() { return generator_->builder(); }
ControlScope* execution_control() { return generator_->execution_control(); }
private:
+ int GetTokenForCommand(Command command, Statement* statement) {
+ switch (command) {
+ case CMD_RETURN:
+ return GetReturnToken();
+ case CMD_ASYNC_RETURN:
+ return GetAsyncReturnToken();
+ case CMD_RETHROW:
+ return GetRethrowToken();
+ default:
+ // TODO(leszeks): We could also search for entries with the same
+ // command and statement.
+ return GetNewTokenForCommand(command, statement);
+ }
+ }
+
+ int GetReturnToken() {
+ if (return_token_ == -1) {
+ return_token_ = GetNewTokenForCommand(CMD_RETURN, nullptr);
+ }
+ return return_token_;
+ }
+
+ int GetAsyncReturnToken() {
+ if (async_return_token_ == -1) {
+ async_return_token_ = GetNewTokenForCommand(CMD_ASYNC_RETURN, nullptr);
+ }
+ return async_return_token_;
+ }
+
+ int GetRethrowToken() {
+ if (rethrow_token_ == -1) {
+ rethrow_token_ = GetNewTokenForCommand(CMD_RETHROW, nullptr);
+ }
+ return rethrow_token_;
+ }
+
+ int GetNewTokenForCommand(Command command, Statement* statement) {
+ int token = static_cast<int>(deferred_.size());
+ deferred_.push_back({command, statement, token});
+ return token;
+ }
+
BytecodeGenerator* generator_;
ZoneVector<Entry> deferred_;
Register token_register_;
Register result_register_;
+
+ // Tokens for commands that don't need a statement.
+ int return_token_;
+ int async_return_token_;
+ int rethrow_token_;
};
// Scoped class for dealing with control flow reaching the function level.
@@ -626,7 +700,6 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
: zone_(info->zone()),
builder_(new (zone()) BytecodeArrayBuilder(
info->isolate(), info->zone(), info->num_parameters_including_this(),
- info->scope()->MaxNestedContextChainLength(),
info->scope()->num_stack_slots(), info->literal(),
info->SourcePositionRecordingMode())),
info_(info),
@@ -642,7 +715,7 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- generator_resume_points_(info->literal()->suspend_count(), info->zone()),
+ generator_jump_table_(nullptr),
generator_state_(),
loop_depth_(0) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
@@ -722,9 +795,8 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
RegisterAllocationScope register_scope(this);
- if (IsResumableFunction(info()->literal()->kind())) {
- generator_state_ = register_allocator()->NewRegister();
- VisitGeneratorPrologue();
+ if (info()->literal()->CanSuspend()) {
+ BuildGeneratorPrologue();
}
if (closure_scope()->NeedsContext()) {
@@ -737,14 +809,6 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
GenerateBytecodeBody();
}
- // In generator functions, we may not have visited every yield in the AST
- // since we skip some obviously dead code. Hence the generated bytecode may
- // contain jumps to unbound labels (resume points that will never be used).
- // We bind these now.
- for (auto& label : generator_resume_points_) {
- if (!label.is_bound()) builder()->Bind(&label);
- }
-
// Emit an implicit return instruction in case control flow can fall off the
// end of the function without an explicit return being present on all paths.
if (builder()->RequiresImplicitReturn()) {
@@ -768,6 +832,12 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Build assignment to {new.target} variable if it is used.
VisitNewTargetVariable(closure_scope()->new_target_var());
+ // Create a generator object if necessary and initialize the
+ // {.generator_object} variable.
+ if (info()->literal()->CanSuspend()) {
+ BuildGeneratorObjectVariableInitialization();
+ }
+
// Emit tracing call if requested to do so.
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
@@ -794,20 +864,6 @@ void BytecodeGenerator::GenerateBytecodeBody() {
VisitStatements(info()->literal()->body());
}
-void BytecodeGenerator::BuildIndexedJump(Register index, size_t start_index,
- size_t size,
- ZoneVector<BytecodeLabel>& targets) {
- // TODO(neis): Optimize this by using a proper jump table.
- DCHECK_LE(start_index + size, targets.size());
- for (size_t i = start_index; i < start_index + size; i++) {
- builder()
- ->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
- .CompareOperation(Token::Value::EQ_STRICT, index)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &(targets[i]));
- }
- BuildAbort(BailoutReason::kInvalidJumpTableIndex);
-}
-
void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder) {
// Recall that stmt->yield_count() is always zero inside ordinary
@@ -815,36 +871,39 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
if (stmt->suspend_count() == 0) {
loop_builder->LoopHeader();
} else {
- // Collect all labels for generator resume points within the loop (if any)
- // so that they can be bound to the loop header below. Also create fresh
- // labels for these resume points, to be used inside the loop.
- ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
- size_t first_yield = stmt->first_suspend_id();
- DCHECK_LE(first_yield + stmt->suspend_count(),
- generator_resume_points_.size());
- for (size_t id = first_yield; id < first_yield + stmt->suspend_count();
- id++) {
- auto& label = generator_resume_points_[id];
- resume_points_in_loop.push_back(label);
- generator_resume_points_[id] = BytecodeLabel();
- }
-
- loop_builder->LoopHeader(&resume_points_in_loop);
-
- // If we are not resuming, fall through to loop body.
- // If we are resuming, perform state dispatch.
+ loop_builder->LoopHeaderInGenerator(
+ &generator_jump_table_, static_cast<int>(stmt->first_suspend_id()),
+ static_cast<int>(stmt->suspend_count()));
+
+ // Perform state dispatch on the generator state, assuming this is a resume.
+ builder()
+ ->LoadAccumulatorWithRegister(generator_state_)
+ .SwitchOnSmiNoFeedback(generator_jump_table_);
+
+ // We fall through when the generator state is not in the jump table. If we
+ // are not resuming, we want to fall through to the loop body.
+ // TODO(leszeks): Only generate this test for debug builds, we can skip it
+ // entirely in release assuming that the generator states is always valid.
BytecodeLabel not_resuming;
builder()
->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
.CompareOperation(Token::Value::EQ_STRICT, generator_state_)
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
- BuildIndexedJump(generator_state_, first_yield, stmt->suspend_count(),
- generator_resume_points_);
+
+ // Otherwise this is an error.
+ BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+
builder()->Bind(&not_resuming);
}
}
-void BytecodeGenerator::VisitGeneratorPrologue() {
+void BytecodeGenerator::BuildGeneratorPrologue() {
+ DCHECK_GT(info()->literal()->suspend_count(), 0);
+
+ generator_state_ = register_allocator()->NewRegister();
+ generator_jump_table_ =
+ builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
+
// The generator resume trampoline abuses the new.target register both to
// indicate that this is a resume call and to pass in the generator object.
// In ordinary calls, new.target is always undefined because generator
@@ -855,24 +914,27 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
->LoadAccumulatorWithRegister(generator_object)
.JumpIfUndefined(&regular_call);
- // This is a resume call. Restore the current context and the registers, then
- // perform state dispatch.
- Register dummy = register_allocator()->NewRegister();
+ // This is a resume call. Restore the current context and the registers,
+ // then perform state dispatch.
+ Register generator_context = register_allocator()->NewRegister();
builder()
->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
- .PushContext(dummy)
+ .PushContext(generator_context)
.ResumeGenerator(generator_object)
- .StoreAccumulatorInRegister(generator_state_);
- BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
- generator_resume_points_);
+ .StoreAccumulatorInRegister(generator_state_)
+ .SwitchOnSmiNoFeedback(generator_jump_table_);
+ // We fall through when the generator state is not in the jump table.
+ // TODO(leszeks): Only generate this for debug builds.
+ BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+ // This is a regular call.
builder()
->Bind(&regular_call)
.LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
.StoreAccumulatorInRegister(generator_state_);
- // This is a regular call. Fall through to the ordinary function prologue,
- // after which we will run into the generator object creation and other extra
- // code inserted by the parser.
+ // Now fall through to the ordinary function prologue, after which we will run
+ // into the generator object creation and other extra code inserted by the
+ // parser.
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -1203,7 +1265,6 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
loop_backbranch.Bind(builder());
loop_builder.JumpToHeader(loop_depth_);
}
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
@@ -1223,7 +1284,6 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
@@ -1251,7 +1311,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
Visit(stmt->next());
}
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForInAssignment(Expression* expr,
@@ -1328,7 +1387,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
return;
}
- LoopBuilder loop_builder(builder());
BytecodeLabel subject_null_label, subject_undefined_label;
// Prepare the state for executing ForIn.
@@ -1350,20 +1408,22 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->StoreAccumulatorInRegister(index);
// The loop
- VisitIterationHeader(stmt, &loop_builder);
- builder()->SetExpressionAsStatementPosition(stmt->each());
- builder()->ForInContinue(index, cache_length);
- loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
- FeedbackSlot slot = stmt->ForInFeedbackSlot();
- builder()->ForInNext(receiver, index, triple.Truncate(2),
- feedback_index(slot));
- loop_builder.ContinueIfUndefined();
- VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
- VisitIterationBody(stmt, &loop_builder);
- builder()->ForInStep(index);
- builder()->StoreAccumulatorInRegister(index);
- loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
+ {
+ LoopBuilder loop_builder(builder());
+ VisitIterationHeader(stmt, &loop_builder);
+ builder()->SetExpressionAsStatementPosition(stmt->each());
+ builder()->ForInContinue(index, cache_length);
+ loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
+ builder()->ForInNext(receiver, index, triple.Truncate(2),
+ feedback_index(slot));
+ loop_builder.ContinueIfUndefined();
+ VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ VisitIterationBody(stmt, &loop_builder);
+ builder()->ForInStep(index);
+ builder()->StoreAccumulatorInRegister(index);
+ loop_builder.JumpToHeader(loop_depth_);
+ }
builder()->Bind(&subject_null_label);
builder()->Bind(&subject_undefined_label);
}
@@ -1383,7 +1443,6 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
VisitForEffect(stmt->assign_each());
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
@@ -1497,7 +1556,8 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
function_literals_.push_back(std::make_pair(expr, entry));
}
-void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
+ VisitDeclarations(expr->scope()->declarations());
Register constructor = VisitForRegisterValue(expr->constructor());
{
RegisterAllocationScope register_scope(this);
@@ -1534,6 +1594,18 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
}
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+ CurrentScope current_scope(this, expr->scope());
+ DCHECK_NOT_NULL(expr->scope());
+ if (expr->scope()->NeedsContext()) {
+ BuildNewLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope());
+ BuildClassLiteral(expr);
+ } else {
+ BuildClassLiteral(expr);
+ }
+}
+
void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
Register constructor,
Register prototype) {
@@ -1680,10 +1752,7 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
- expr->IsFastCloningSupported(),
- ConstructorBuiltins::FastCloneShallowObjectPropertiesCount(
- expr->properties_count()),
- expr->ComputeFlags());
+ expr->ComputeFlags(), expr->IsFastCloningSupported());
Register literal = register_allocator()->NewRegister();
size_t entry;
@@ -1695,6 +1764,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
}
+ // TODO(cbruni): Directly generate runtime call for literals we cannot
+ // optimize once the FastCloneShallowObject stub is in sync with the TF
+ // optimizations.
builder()->CreateObjectLiteral(entry, feedback_index(expr->literal_slot()),
flags, literal);
@@ -1756,6 +1828,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
+ // __proto__:null is handled by CreateObjectLiteral.
+ if (property->IsNullPrototype()) break;
DCHECK(property->emit_store());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
@@ -1805,7 +1879,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
RegisterAllocationScope inner_register_scope(this);
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ if (property->IsPrototype()) {
+ // __proto__:null is handled by CreateObjectLiteral.
+ if (property->IsNullPrototype()) continue;
DCHECK(property->emit_store());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
@@ -2104,8 +2180,6 @@ void BytecodeGenerator::BuildThrowReferenceError(const AstRawString* name) {
}
void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
- // TODO(interpreter): Can the parser reduce the number of checks
- // performed? Or should there be a ThrowIfHole bytecode.
BytecodeLabel no_reference_error;
builder()->JumpIfNotHole(&no_reference_error);
@@ -2380,12 +2454,13 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void BytecodeGenerator::VisitSuspend(Suspend* expr) {
+void BytecodeGenerator::BuildGeneratorSuspend(Suspend* expr,
+ Register generator) {
+ RegisterAllocationScope register_scope(this);
+
builder()->SetExpressionPosition(expr);
Register value = VisitForRegisterValue(expr->expression());
- Register generator = VisitForRegisterValue(expr->generator_object());
-
// Save context, registers, and state. Then return.
builder()
->LoadLiteral(Smi::FromInt(expr->suspend_id()))
@@ -2394,98 +2469,99 @@ void BytecodeGenerator::VisitSuspend(Suspend* expr) {
if (expr->IsNonInitialAsyncGeneratorYield()) {
// AsyncGenerator yields (with the exception of the initial yield) delegate
// to AsyncGeneratorResolve(), implemented via the runtime call below.
- RegisterList args = register_allocator()->NewRegisterList(2);
-
- int context_index = expr->is_yield_star()
- ? Context::ASYNC_GENERATOR_RAW_YIELD
- : Context::ASYNC_GENERATOR_YIELD;
+ RegisterList args = register_allocator()->NewRegisterList(3);
- // Async GeneratorYield:
+ // AsyncGeneratorYield:
// perform AsyncGeneratorResolve(<generator>, <value>, false).
builder()
->MoveRegister(generator, args[0])
.MoveRegister(value, args[1])
- .CallJSRuntime(context_index, args);
+ .LoadFalse()
+ .StoreAccumulatorInRegister(args[2])
+ .CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
builder()->LoadAccumulatorWithRegister(value);
}
builder()->Return(); // Hard return (ignore any finally blocks).
+}
- builder()->Bind(&(generator_resume_points_[expr->suspend_id()]));
- // Upon resume, we continue here.
-
- {
- RegisterAllocationScope register_scope(this);
-
- // Update state to indicate that we have finished resuming. Loop headers
- // rely on this.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
+void BytecodeGenerator::BuildGeneratorResume(Suspend* expr,
+ Register generator) {
+ RegisterAllocationScope register_scope(this);
- Register input = register_allocator()->NewRegister();
+ // Update state to indicate that we have finished resuming. Loop headers
+ // rely on this.
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .StoreAccumulatorInRegister(generator_state_);
- // When resuming an Async Generator from an Await expression, the sent
- // value is in the [[await_input_or_debug_pos]] slot. Otherwise, the sent
- // value is in the [[input_or_debug_pos]] slot.
- Runtime::FunctionId get_generator_input =
- expr->is_async_generator() && expr->is_await()
- ? Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos
- : Runtime::kInlineGeneratorGetInputOrDebugPos;
+ Register input = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(get_generator_input, generator)
- .StoreAccumulatorInRegister(input);
+ // When resuming an Async Generator from an Await expression, the sent
+ // value is in the [[await_input_or_debug_pos]] slot. Otherwise, the sent
+ // value is in the [[input_or_debug_pos]] slot.
+ Runtime::FunctionId get_generator_input =
+ expr->is_async_generator() && expr->is_await()
+ ? Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos
+ : Runtime::kInlineGeneratorGetInputOrDebugPos;
- Register resume_mode = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
- .StoreAccumulatorInRegister(resume_mode);
+ DCHECK(generator.is_valid());
+ builder()
+ ->CallRuntime(get_generator_input, generator)
+ .StoreAccumulatorInRegister(input);
- // Now dispatch on resume mode.
+ Register resume_mode = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
+ .StoreAccumulatorInRegister(resume_mode);
- BytecodeLabel resume_with_next;
- BytecodeLabel resume_with_return;
- BytecodeLabel resume_with_throw;
+ // Now dispatch on resume mode.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
- .CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_next)
- .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
- .CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_throw)
- .Jump(&resume_with_return);
-
- builder()->Bind(&resume_with_return);
- {
- if (expr->is_async_generator()) {
- // Async generator methods will produce the iter result object.
- builder()->LoadAccumulatorWithRegister(input);
- execution_control()->AsyncReturnAccumulator();
- } else {
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(input, args[0])
- .LoadTrue()
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
- execution_control()->ReturnAccumulator();
- }
- }
+ BytecodeLabel resume_with_next;
+ BytecodeLabel resume_with_throw;
- builder()->Bind(&resume_with_throw);
- builder()->SetExpressionPosition(expr);
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_next)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_throw);
+ // Fall through for resuming with return.
+
+ if (expr->is_async_generator()) {
+ // Async generator methods will produce the iter result object.
builder()->LoadAccumulatorWithRegister(input);
- if (expr->rethrow_on_exception()) {
- builder()->ReThrow();
- } else {
- builder()->Throw();
- }
+ execution_control()->AsyncReturnAccumulator();
+ } else {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(input, args[0])
+ .LoadTrue()
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
+ execution_control()->ReturnAccumulator();
+ }
- builder()->Bind(&resume_with_next);
- builder()->LoadAccumulatorWithRegister(input);
+ builder()->Bind(&resume_with_throw);
+ builder()->SetExpressionPosition(expr);
+ builder()->LoadAccumulatorWithRegister(input);
+ if (expr->rethrow_on_exception()) {
+ builder()->ReThrow();
+ } else {
+ builder()->Throw();
}
+
+ builder()->Bind(&resume_with_next);
+ builder()->LoadAccumulatorWithRegister(input);
+}
+
+void BytecodeGenerator::VisitSuspend(Suspend* expr) {
+ Register generator = VisitForRegisterValue(expr->generator_object());
+ BuildGeneratorSuspend(expr, generator);
+ builder()->Bind(generator_jump_table_, static_cast<int>(expr->suspend_id()));
+ // Upon resume, we continue here.
+ BuildGeneratorResume(expr, generator);
}
void BytecodeGenerator::VisitThrow(Throw* expr) {
@@ -3511,6 +3587,20 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
builder()->Bind(&flush_state_label);
}
+void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
+ DCHECK(IsResumableFunction(info()->literal()->kind()));
+ DCHECK_NOT_NULL(closure_scope()->generator_object_var());
+
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(Register::function_closure(), args[0])
+ .MoveRegister(builder()->Receiver(), args[1])
+ .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args);
+ BuildVariableAssignment(closure_scope()->generator_object_var(), Token::INIT,
+ FeedbackSlot::Invalid(), HoleCheckMode::kElided);
+}
+
void BytecodeGenerator::VisitFunctionClosureForContext() {
ValueResultScope value_execution_result(this);
if (closure_scope()->is_script_scope()) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 8ad09f686a..6e277e3799 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -21,6 +21,7 @@ namespace interpreter {
class GlobalDeclarationsBuilder;
class LoopBuilder;
+class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
@@ -133,7 +134,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalCatchContext(Scope* scope);
void BuildNewLocalWithContext(Scope* scope);
- void VisitGeneratorPrologue();
+ void BuildGeneratorPrologue();
+ void BuildGeneratorSuspend(Suspend* expr, Register generator);
+ void BuildGeneratorResume(Suspend* expr, Register generator);
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
@@ -141,8 +144,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
Register prototype);
void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
+ void BuildClassLiteral(ClassLiteral* expr);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
+ void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitFunctionClosureForContext();
void VisitSetHomeObject(Register value, Register home_object,
@@ -237,7 +242,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
- ZoneVector<BytecodeLabel> generator_resume_points_;
+ BytecodeJumpTable* generator_jump_table_;
Register generator_state_;
int loop_depth_;
};
diff --git a/deps/v8/src/interpreter/bytecode-jump-table.h b/deps/v8/src/interpreter/bytecode-jump-table.h
new file mode 100644
index 0000000000..b0a36cadbb
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-jump-table.h
@@ -0,0 +1,88 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
+#define V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
+
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilder;
+
+// A jump table for a set of targets in a bytecode array. When an entry in the
+// table is bound, it represents a known position in the bytecode array. If no
+// entries match, the switch falls through.
+class V8_EXPORT_PRIVATE BytecodeJumpTable final : public ZoneObject {
+ public:
+ // Constructs a new BytecodeJumpTable starting at |constant_pool_index|, with
+ // the given |size|, where the case values of the table start at
+ // |case_value_base|.
+ BytecodeJumpTable(size_t constant_pool_index, int size, int case_value_base,
+ Zone* zone)
+ :
+#ifdef DEBUG
+ bound_(size, zone),
+#endif
+ constant_pool_index_(constant_pool_index),
+ switch_bytecode_offset_(kInvalidOffset),
+ size_(size),
+ case_value_base_(case_value_base) {
+ }
+
+ size_t constant_pool_index() const { return constant_pool_index_; }
+ size_t switch_bytecode_offset() const { return switch_bytecode_offset_; }
+ int case_value_base() const { return case_value_base_; }
+ int size() const { return size_; }
+#ifdef DEBUG
+ bool is_bound(int case_value) const {
+ DCHECK_GE(case_value, case_value_base_);
+ DCHECK_LT(case_value, case_value_base_ + size());
+ return bound_.Contains(case_value - case_value_base_);
+ }
+#endif
+
+ size_t ConstantPoolEntryFor(int case_value) {
+ DCHECK_GE(case_value, case_value_base_);
+ return constant_pool_index_ + case_value - case_value_base_;
+ }
+
+ private:
+ static const size_t kInvalidIndex = static_cast<size_t>(-1);
+ static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+ void mark_bound(int case_value) {
+#ifdef DEBUG
+ DCHECK_GE(case_value, case_value_base_);
+ DCHECK_LT(case_value, case_value_base_ + size());
+ bound_.Add(case_value - case_value_base_);
+#endif
+ }
+
+ void set_switch_bytecode_offset(size_t offset) {
+ DCHECK_EQ(switch_bytecode_offset_, kInvalidOffset);
+ switch_bytecode_offset_ = offset;
+ }
+
+#ifdef DEBUG
+ // This bit vector is only used for DCHECKS, so only store the field in debug
+ // builds.
+ BitVector bound_;
+#endif
+ size_t constant_pool_index_;
+ size_t switch_bytecode_offset_;
+ int size_;
+ int case_value_base_;
+
+ friend class BytecodeArrayWriter;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-node.cc
index 06accd75dc..2bcea0a16a 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.cc
+++ b/deps/v8/src/interpreter/bytecode-node.cc
@@ -1,8 +1,8 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecode-node.h"
#include <iomanip>
#include "src/source-position-table.h"
@@ -47,14 +47,6 @@ bool BytecodeNode::operator==(const BytecodeNode& other) const {
return true;
}
-std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
- if (info.is_valid()) {
- char description = info.is_statement() ? 'S' : 'E';
- os << info.source_position() << ' ' << description << '>';
- }
- return os;
-}
-
std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
node.Print(os);
return os;
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-node.h
index 23cad237dd..98e1577f45 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-node.h
@@ -1,138 +1,22 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
-#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#ifndef V8_INTERPRETER_BYTECODE_NODE_H_
+#define V8_INTERPRETER_BYTECODE_NODE_H_
+
+#include <algorithm>
-#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/interpreter/bytecode-register-allocator.h"
-#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeLabel;
-class BytecodeNode;
-class BytecodeSourceInfo;
-
-// Interface for bytecode pipeline stages.
-class BytecodePipelineStage {
- public:
- virtual ~BytecodePipelineStage() {}
-
- // Write bytecode node |node| into pipeline. The node is only valid
- // for the duration of the call. Callee's should clone it if
- // deferring Write() to the next stage.
- virtual void Write(BytecodeNode* node) = 0;
-
- // Write jump bytecode node |node| which jumps to |label| into pipeline.
- // The node and label are only valid for the duration of the call. This call
- // implicitly ends the current basic block so should always write to the next
- // stage.
- virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
-
- // Binds |label| to the current bytecode location. This call implicitly
- // ends the current basic block and so any deferred bytecodes should be
- // written to the next stage.
- virtual void BindLabel(BytecodeLabel* label) = 0;
-
- // Binds |label| to the location of |target|. This call implicitly
- // ends the current basic block and so any deferred bytecodes should be
- // written to the next stage.
- virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
-
- // Flush the pipeline and generate a bytecode array.
- virtual Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) = 0;
-};
-
-// Source code position information.
-class BytecodeSourceInfo final {
- public:
- static const int kUninitializedPosition = -1;
-
- BytecodeSourceInfo()
- : position_type_(PositionType::kNone),
- source_position_(kUninitializedPosition) {}
-
- BytecodeSourceInfo(int source_position, bool is_statement)
- : position_type_(is_statement ? PositionType::kStatement
- : PositionType::kExpression),
- source_position_(source_position) {
- DCHECK_GE(source_position, 0);
- }
-
- // Makes instance into a statement position.
- void MakeStatementPosition(int source_position) {
- // Statement positions can be replaced by other statement
- // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
- // statement position associated with 7 but no bytecode associated
- // with it. Then Next is emitted after the body and has
- // statement position and overrides the existing one.
- position_type_ = PositionType::kStatement;
- source_position_ = source_position;
- }
-
- // Makes instance into an expression position. Instance should not
- // be a statement position otherwise it could be lost and impair the
- // debugging experience.
- void MakeExpressionPosition(int source_position) {
- DCHECK(!is_statement());
- position_type_ = PositionType::kExpression;
- source_position_ = source_position;
- }
-
- // Forces an instance into an expression position.
- void ForceExpressionPosition(int source_position) {
- position_type_ = PositionType::kExpression;
- source_position_ = source_position;
- }
-
- int source_position() const {
- DCHECK(is_valid());
- return source_position_;
- }
-
- bool is_statement() const {
- return position_type_ == PositionType::kStatement;
- }
- bool is_expression() const {
- return position_type_ == PositionType::kExpression;
- }
-
- bool is_valid() const { return position_type_ != PositionType::kNone; }
- void set_invalid() {
- position_type_ = PositionType::kNone;
- source_position_ = kUninitializedPosition;
- }
-
- bool operator==(const BytecodeSourceInfo& other) const {
- return position_type_ == other.position_type_ &&
- source_position_ == other.source_position_;
- }
-
- bool operator!=(const BytecodeSourceInfo& other) const {
- return position_type_ != other.position_type_ ||
- source_position_ != other.source_position_;
- }
-
- private:
- enum class PositionType : uint8_t { kNone, kExpression, kStatement };
-
- PositionType position_type_;
- int source_position_;
-};
-
// A container for a generated bytecode, it's operands, and source information.
-// These must be allocated by a BytecodeNodeAllocator instance.
-class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
+class V8_EXPORT_PRIVATE BytecodeNode final {
public:
INLINE(BytecodeNode(Bytecode bytecode,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
@@ -215,39 +99,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
#undef DEFINE_BYTECODE_NODE_CREATOR
- // Replace the bytecode of this node with |bytecode| and keep the operands.
- void replace_bytecode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
- Bytecodes::NumberOfOperands(bytecode));
- bytecode_ = bytecode;
- }
-
- void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
-
// Print to stream |os|.
void Print(std::ostream& os) const;
- // Transform to a node representing |new_bytecode| which has one
- // operand more than the current bytecode.
- void Transform(Bytecode new_bytecode, uint32_t extra_operand) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
- Bytecodes::NumberOfOperands(bytecode()) + 1);
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
- Bytecodes::GetOperandType(new_bytecode, 0) ==
- Bytecodes::GetOperandType(bytecode(), 0));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
- Bytecodes::GetOperandType(new_bytecode, 1) ==
- Bytecodes::GetOperandType(bytecode(), 1));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
- Bytecodes::GetOperandType(new_bytecode, 2) ==
- Bytecodes::GetOperandType(bytecode(), 2));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
-
- bytecode_ = new_bytecode;
- operand_count_++;
- SetOperand(operand_count() - 1, extra_operand);
- }
-
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
@@ -256,6 +110,8 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
}
const uint32_t* operands() const { return operands_; }
+ void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
+
int operand_count() const { return operand_count_; }
OperandScale operand_scale() const { return operand_scale_; }
@@ -410,12 +266,10 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const BytecodeSourceInfo& info);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const BytecodeNode& node);
} // namespace interpreter
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#endif // V8_INTERPRETER_BYTECODE_NODE_H_
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 583d99c227..859f0e1828 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -207,14 +207,10 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// Calculate offset so register index values can be mapped into
// a vector of register metadata.
- if (parameter_count != 0) {
- register_info_table_offset_ =
- -Register::FromParameterIndex(0, parameter_count).index();
- } else {
- // TODO(oth): This path shouldn't be necessary in bytecode generated
- // from Javascript, but a set of tests do not include the JS receiver.
- register_info_table_offset_ = -accumulator_.index();
- }
+ // There is at least one parameter, which is the JS receiver.
+ DCHECK(parameter_count != 0);
+ register_info_table_offset_ =
+ -Register::FromParameterIndex(0, parameter_count).index();
// Initialize register map for parameters, locals, and the
// accumulator.
@@ -322,6 +318,15 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
RegisterInfo* output_info) {
+ bool output_is_observable =
+ RegisterIsObservable(output_info->register_value());
+ bool in_same_equivalence_set =
+ output_info->IsInSameEquivalenceSet(input_info);
+ if (in_same_equivalence_set &&
+ (!output_is_observable || output_info->materialized())) {
+ return; // Nothing more to do.
+ }
+
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@@ -329,12 +334,10 @@ void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
}
// Add |output_info| to new equivalence set.
- if (!output_info->IsInSameEquivalenceSet(input_info)) {
+ if (!in_same_equivalence_set) {
AddToEquivalenceSet(input_info, output_info);
}
- bool output_is_observable =
- RegisterIsObservable(output_info->register_value());
if (output_is_observable) {
// Force store to be emitted when register is observable.
output_info->set_materialized(false);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 0e379a2599..494abb6c96 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -64,14 +64,18 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Prepares for |bytecode|.
template <Bytecode bytecode, AccumulatorUse accumulator_use>
INLINE(void PrepareForBytecode()) {
- if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
- bytecode == Bytecode::kSuspendGenerator) {
+ if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
+ bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator ||
+ bytecode == Bytecode::kResumeGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
- // aren't
- // known.
+ // aren't known)
+ // - a switch bytecode (as the register equivalents at the switch targets
+ // aren't known)
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
+ // - a generator resume (as this involves restoring all registers).
Flush();
}
diff --git a/deps/v8/src/interpreter/bytecode-source-info.cc b/deps/v8/src/interpreter/bytecode-source-info.cc
new file mode 100644
index 0000000000..ed05b3e2e7
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-source-info.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-source-info.h"
+
+#include <iomanip>
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ char description = info.is_statement() ? 'S' : 'E';
+ os << info.source_position() << ' ' << description << '>';
+ }
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-source-info.h b/deps/v8/src/interpreter/bytecode-source-info.h
new file mode 100644
index 0000000000..790a6b2aa2
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-source-info.h
@@ -0,0 +1,98 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
+#define V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Source code position information.
+class BytecodeSourceInfo final {
+ public:
+ static const int kUninitializedPosition = -1;
+
+ BytecodeSourceInfo()
+ : position_type_(PositionType::kNone),
+ source_position_(kUninitializedPosition) {}
+
+ BytecodeSourceInfo(int source_position, bool is_statement)
+ : position_type_(is_statement ? PositionType::kStatement
+ : PositionType::kExpression),
+ source_position_(source_position) {
+ DCHECK_GE(source_position, 0);
+ }
+
+ // Makes instance into a statement position.
+ void MakeStatementPosition(int source_position) {
+ // Statement positions can be replaced by other statement
+ // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
+ // statement position associated with 7 but no bytecode associated
+ // with it. Then Next is emitted after the body and has
+ // statement position and overrides the existing one.
+ position_type_ = PositionType::kStatement;
+ source_position_ = source_position;
+ }
+
+ // Makes instance into an expression position. Instance should not
+ // be a statement position otherwise it could be lost and impair the
+ // debugging experience.
+ void MakeExpressionPosition(int source_position) {
+ DCHECK(!is_statement());
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ // Forces an instance into an expression position.
+ void ForceExpressionPosition(int source_position) {
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ int source_position() const {
+ DCHECK(is_valid());
+ return source_position_;
+ }
+
+ bool is_statement() const {
+ return position_type_ == PositionType::kStatement;
+ }
+ bool is_expression() const {
+ return position_type_ == PositionType::kExpression;
+ }
+
+ bool is_valid() const { return position_type_ != PositionType::kNone; }
+ void set_invalid() {
+ position_type_ = PositionType::kNone;
+ source_position_ = kUninitializedPosition;
+ }
+
+ bool operator==(const BytecodeSourceInfo& other) const {
+ return position_type_ == other.position_type_ &&
+ source_position_ == other.source_position_;
+ }
+
+ bool operator!=(const BytecodeSourceInfo& other) const {
+ return position_type_ != other.position_type_ ||
+ source_position_ != other.source_position_;
+ }
+
+ private:
+ enum class PositionType : uint8_t { kNone, kExpression, kStatement };
+
+ PositionType position_type_;
+ int source_position_;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeSourceInfo& info);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index baf9e88963..83417fe879 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -284,6 +284,10 @@ namespace interpreter {
V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm) \
V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kUImm) \
\
+ /* Smi-table lookup for switch statements */ \
+ V(SwitchOnSmiNoFeedback, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kUImm, OperandType::kImm) \
+ \
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
OperandType::kRegOutTriple) \
@@ -611,13 +615,18 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
}
+ // Returns true if the bytecode is a switch.
+ static constexpr bool IsSwitch(Bytecode bytecode) {
+ return bytecode == Bytecode::kSwitchOnSmiNoFeedback;
+ }
+
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw.
static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
IsCompareWithoutEffects(bytecode) || bytecode == Bytecode::kNop ||
- IsJumpWithoutEffects(bytecode));
+ IsJumpWithoutEffects(bytecode) || IsSwitch(bytecode));
}
// Returns true if the bytecode is Ldar or Star.
@@ -640,7 +649,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
bytecode == Bytecode::kConstruct ||
bytecode == Bytecode::kCallWithSpread ||
bytecode == Bytecode::kConstructWithSpread ||
- bytecode == Bytecode::kInvokeIntrinsic ||
bytecode == Bytecode::kCallJSRuntime;
}
@@ -752,7 +760,8 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the receiver mode of the given call bytecode.
static ConvertReceiverMode GetReceiverMode(Bytecode bytecode) {
- DCHECK(IsCallOrConstruct(bytecode));
+ DCHECK(IsCallOrConstruct(bytecode) ||
+ bytecode == Bytecode::kInvokeIntrinsic);
switch (bytecode) {
case Bytecode::kCallProperty:
case Bytecode::kCallProperty0:
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 74d887a61a..f7e68f876e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -38,11 +38,13 @@ void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
}
size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
- ConstantArrayBuilder::Entry entry) {
- DCHECK_GT(available(), 0u);
+ ConstantArrayBuilder::Entry entry, size_t count) {
+ DCHECK_GE(available(), count);
size_t index = constants_.size();
DCHECK_LT(index, capacity());
- constants_.push_back(entry);
+ for (size_t i = 0; i < count; ++i) {
+ constants_.push_back(entry);
+ }
return index + start_index();
}
@@ -65,7 +67,12 @@ void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
Isolate* isolate) const {
std::set<Object*> elements;
for (const Entry& entry : constants_) {
+ // TODO(leszeks): Ignore jump tables because they have to be contiguous,
+ // so they can contain duplicates.
+ if (entry.IsJumpTableEntry()) continue;
+
Handle<Object> handle = entry.ToHandle(isolate);
+
if (elements.find(*handle) != elements.end()) {
std::ostringstream os;
os << "Duplicate constant found: " << Brief(*handle) << std::endl;
@@ -220,9 +227,14 @@ SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
ConstantArrayBuilder::Entry entry) {
+ return AllocateIndexArray(entry, 1);
+}
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndexArray(
+ ConstantArrayBuilder::Entry entry, size_t count) {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
- if (idx_slice_[i]->available() > 0) {
- return static_cast<index_t>(idx_slice_[i]->Allocate(entry));
+ if (idx_slice_[i]->available() >= count) {
+ return static_cast<index_t>(idx_slice_[i]->Allocate(entry, count));
}
}
UNREACHABLE();
@@ -254,11 +266,24 @@ size_t ConstantArrayBuilder::InsertDeferred() {
return AllocateIndex(Entry::Deferred());
}
+size_t ConstantArrayBuilder::InsertJumpTable(size_t size) {
+ return AllocateIndexArray(Entry::UninitializedJumpTableSmi(), size);
+}
+
void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
ConstantArraySlice* slice = IndexToSlice(index);
return slice->At(index).SetDeferred(object);
}
+void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi* smi) {
+ ConstantArraySlice* slice = IndexToSlice(index);
+ // Allow others to reuse these Smis, but insert using emplace to avoid
+ // overwriting existing values in the Smi map (which may have a smaller
+ // operand size).
+ smi_map_.emplace(smi, static_cast<index_t>(index));
+ return slice->At(index).SetJumpTableSmi(smi);
+}
+
OperandSize ConstantArrayBuilder::CreateReservedEntry() {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
if (idx_slice_[i]->available() > 0) {
@@ -311,7 +336,11 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kHandle:
return handle_;
case Tag::kSmi:
+ case Tag::kJumpTableSmi:
return handle(smi_, isolate);
+ case Tag::kUninitializedJumpTableSmi:
+ // TODO(leszeks): There's probably a better value we could use here.
+ return isolate->factory()->the_hole_value();
case Tag::kRawString:
return raw_string_->string();
case Tag::kHeapNumber:
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 86e7c0818b..a50aa3519c 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -70,9 +70,18 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
// SetDeferredAt().
size_t InsertDeferred();
+ // Inserts |size| consecutive empty entries and returns the array index
+ // associated with the first reservation. Each entry's Smi value can be
+ // inserted by calling SetJumpTableSmi().
+ size_t InsertJumpTable(size_t size);
+
// Sets the deferred value at |index| to |object|.
void SetDeferredAt(size_t index, Handle<Object> object);
+ // Sets the jump table entry at |index| to |smi|. Note that |index| is the
+ // constant pool index, not the switch case value.
+ void SetJumpTableSmi(size_t index, Smi* smi);
+
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
// when committed.
@@ -107,14 +116,29 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
static Entry Deferred() { return Entry(Tag::kDeferred); }
+ static Entry UninitializedJumpTableSmi() {
+ return Entry(Tag::kUninitializedJumpTableSmi);
+ }
+
bool IsDeferred() const { return tag_ == Tag::kDeferred; }
+ bool IsJumpTableEntry() const {
+ return tag_ == Tag::kUninitializedJumpTableSmi ||
+ tag_ == Tag::kJumpTableSmi;
+ }
+
void SetDeferred(Handle<Object> handle) {
DCHECK(tag_ == Tag::kDeferred);
tag_ = Tag::kHandle;
handle_ = handle;
}
+ void SetJumpTableSmi(Smi* smi) {
+ DCHECK(tag_ == Tag::kUninitializedJumpTableSmi);
+ tag_ = Tag::kJumpTableSmi;
+ smi_ = smi;
+ }
+
Handle<Object> ToHandle(Isolate* isolate) const;
private:
@@ -135,6 +159,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
kRawString,
kHeapNumber,
kScope,
+ kUninitializedJumpTableSmi,
+ kJumpTableSmi,
#define ENTRY_TAG(NAME, ...) k##NAME,
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_TAG)
#undef ENTRY_TAG
@@ -142,6 +168,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
};
index_t AllocateIndex(Entry constant_entry);
+ index_t AllocateIndexArray(Entry constant_entry, size_t size);
index_t AllocateReservedEntry(Smi* value);
struct ConstantArraySlice final : public ZoneObject {
@@ -149,7 +176,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
OperandSize operand_size);
void Reserve();
void Unreserve();
- size_t Allocate(Entry entry);
+ size_t Allocate(Entry entry, size_t count = 1);
Entry& At(size_t index);
const Entry& At(size_t index) const;
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 81041e6a3d..e4281667c2 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,21 +47,40 @@ void BlockBuilder::EndBlock() {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
- DCHECK(header_labels_.empty() || header_labels_.is_bound());
+ BindBreakTarget();
+ // Restore the parent jump table.
+ if (generator_jump_table_location_ != nullptr) {
+ *generator_jump_table_location_ = parent_generator_jump_table_;
+ }
}
-void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
+void LoopBuilder::LoopHeader() {
// Jumps from before the loop header into the loop violate ordering
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
- if (additional_labels != nullptr) {
- for (auto& label : *additional_labels) {
- builder()->Bind(&label);
- }
+}
+
+void LoopBuilder::LoopHeaderInGenerator(
+ BytecodeJumpTable** generator_jump_table, int first_resume_id,
+ int resume_count) {
+ // Bind all the resume points that are inside the loop to be at the loop
+ // header.
+ for (int id = first_resume_id; id < first_resume_id + resume_count; ++id) {
+ builder()->Bind(*generator_jump_table, id);
}
+
+ // Create the loop header.
+ LoopHeader();
+
+ // Create a new jump table for after the loop header for only these
+ // resume points.
+ generator_jump_table_location_ = generator_jump_table;
+ parent_generator_jump_table_ = *generator_jump_table;
+ *generator_jump_table =
+ builder()->AllocateJumpTable(resume_count, first_resume_id);
}
void LoopBuilder::JumpToHeader(int loop_depth) {
@@ -74,11 +93,6 @@ void LoopBuilder::JumpToHeader(int loop_depth) {
builder()->JumpLoop(&loop_header_, level);
}
-void LoopBuilder::EndLoop() {
- BindBreakTarget();
- header_labels_.BindToLabel(builder(), loop_header_);
-}
-
void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
SwitchBuilder::~SwitchBuilder() {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 313c9aa536..8cff017e78 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -90,13 +90,15 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
continue_labels_(builder->zone()),
- header_labels_(builder->zone()) {}
+ generator_jump_table_location_(nullptr),
+ parent_generator_jump_table_(nullptr) {}
~LoopBuilder();
- void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels = nullptr);
+ void LoopHeader();
+ void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table,
+ int first_resume_id, int resume_count);
void JumpToHeader(int loop_depth);
void BindContinueTarget();
- void EndLoop();
// This method is called when visiting continue statements in the AST.
// Inserts a jump to an unbound label that is patched when BindContinueTarget
@@ -111,7 +113,13 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
// Unbound labels that identify jumps for continue statements in the code and
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
- BytecodeLabels header_labels_;
+
+ // While we're in the loop, we want to have a different jump table for
+ // generator switch statements. We restore it at the end of the loop.
+ // TODO(leszeks): Storing a pointer to the BytecodeGenerator's jump table
+ // field is ugly, figure out a better way to do this.
+ BytecodeJumpTable** generator_jump_table_location_;
+ BytecodeJumpTable* parent_generator_jump_table_;
};
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index b65c7c7501..070c89549b 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -49,9 +49,9 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
dispatch_table_.Bind(
Parameter(InterpreterDispatchDescriptor::kDispatchTable));
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#endif
RegisterCallGenerationCallbacks([this] { CallPrologue(); },
[this] { CallEpilogue(); });
@@ -119,7 +119,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0.
- Bind(&context_search);
+ BIND(&context_search);
{
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
@@ -129,7 +129,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
&context_search);
}
- Bind(&context_found);
+ BIND(&context_found);
return cur_context.value();
}
@@ -147,7 +147,7 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
// Loop until the depth is 0.
Goto(&context_search);
- Bind(&context_search);
+ BIND(&context_search);
{
// TODO(leszeks): We only need to do this check if the context had a sloppy
// eval, we could pass in a context chain bitmask to figure out which
@@ -204,7 +204,7 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
+ return TimesPointerSize(index);
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
@@ -598,7 +598,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Node* is_smi = TaggedIsSmi(function);
Branch(is_smi, &extra_checks, &call_function);
- Bind(&call_function);
+ BIND(&call_function);
{
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -614,7 +614,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&end);
}
- Bind(&extra_checks);
+ BIND(&extra_checks);
{
Label check_initialized(this), mark_megamorphic(this),
create_allocation_site(this);
@@ -658,7 +658,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&mark_megamorphic);
}
- Bind(&check_initialized);
+ BIND(&check_initialized);
{
Comment("check if uninitialized");
// Check if it is uninitialized target first.
@@ -698,7 +698,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&call_function);
}
- Bind(&create_allocation_site);
+ BIND(&create_allocation_site);
{
CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot_id));
@@ -708,7 +708,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&call_function);
}
- Bind(&mark_megamorphic);
+ BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
@@ -722,7 +722,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
}
}
- Bind(&call);
+ BIND(&call);
{
Comment("Increment call count and call using Call builtin");
// Increment the call count.
@@ -739,7 +739,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
return return_value.value();
}
@@ -748,7 +748,8 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
ConvertReceiverMode receiver_mode,
TailCallMode tail_call_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
- DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
+ DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
+ bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, tail_call_mode,
@@ -804,7 +805,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
allocation_feedback.Bind(UndefinedConstant());
Branch(is_monomorphic, &call_construct_function, &extra_checks);
- Bind(&call_construct_function);
+ BIND(&call_construct_function);
{
Comment("call using ConstructFunction");
IncrementCallCount(feedback_vector, slot_id);
@@ -817,7 +818,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&end);
}
- Bind(&extra_checks);
+ BIND(&extra_checks);
{
Label check_allocation_site(this), check_initialized(this),
initialize(this), mark_megamorphic(this);
@@ -840,7 +841,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Node* is_smi = TaggedIsSmi(feedback_value);
Branch(is_smi, &initialize, &mark_megamorphic);
- Bind(&check_allocation_site);
+ BIND(&check_allocation_site);
{
Comment("check if it is an allocation site");
Node* is_allocation_site =
@@ -858,7 +859,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&call_construct_function);
}
- Bind(&check_initialized);
+ BIND(&check_initialized);
{
// Check if it is uninitialized.
Comment("check if uninitialized");
@@ -867,7 +868,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
- Bind(&initialize);
+ BIND(&initialize);
{
Label create_allocation_site(this), create_weak_cell(this);
Comment("initialize the feedback element");
@@ -878,7 +879,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Node* is_array_function = WordEqual(context_slot, constructor);
Branch(is_array_function, &create_allocation_site, &create_weak_cell);
- Bind(&create_allocation_site);
+ BIND(&create_allocation_site);
{
Node* site = CreateAllocationSiteInFeedbackVector(feedback_vector,
SmiTag(slot_id));
@@ -886,7 +887,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&call_construct_function);
}
- Bind(&create_weak_cell);
+ BIND(&create_weak_cell);
{
CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
constructor);
@@ -894,7 +895,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
}
}
- Bind(&mark_megamorphic);
+ BIND(&mark_megamorphic);
{
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
@@ -908,7 +909,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
}
}
- Bind(&call_construct);
+ BIND(&call_construct);
{
Comment("call using Construct builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
@@ -920,7 +921,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
return return_value.value();
}
@@ -990,7 +991,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
- Bind(&interrupt_check);
+ BIND(&interrupt_check);
{
CallRuntime(Runtime::kInterrupt, GetContext());
new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
@@ -998,7 +999,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
}
// Update budget.
- Bind(&ok);
+ BIND(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset,
new_budget.value());
@@ -1011,9 +1012,9 @@ Node* InterpreterAssembler::Advance(int delta) {
}
Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+#endif
Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
: IntPtrAdd(BytecodeOffset(), delta);
bytecode_offset_.Bind(next_offset);
@@ -1039,9 +1040,9 @@ void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
Branch(condition, &match, &no_match);
- Bind(&match);
+ BIND(&match);
Jump(delta);
- Bind(&no_match);
+ BIND(&no_match);
Dispatch();
}
@@ -1070,13 +1071,13 @@ Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Node* is_star = WordEqual(target_bytecode, star_bytecode);
Branch(is_star, &do_inline_star, &done);
- Bind(&do_inline_star);
+ BIND(&do_inline_star);
{
InlineStar();
var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
Goto(&done);
}
- Bind(&done);
+ BIND(&done);
return var_bytecode.value();
}
@@ -1087,9 +1088,9 @@ void InterpreterAssembler::InlineStar() {
bytecode_ = Bytecode::kStar;
accumulator_use_ = AccumulatorUse::kNone;
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#endif
StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
@@ -1119,7 +1120,7 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
+ TimesPointerSize(target_bytecode));
return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
@@ -1172,7 +1173,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- WordShl(target_index, kPointerSizeLog2));
+ TimesPointerSize(target_index));
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
@@ -1187,7 +1188,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
var_value.Bind(value);
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
// Load the current {value}.
value = var_value.value();
@@ -1196,7 +1197,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Label if_valueissmi(this), if_valueisnotsmi(this);
Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
- Bind(&if_valueissmi);
+ BIND(&if_valueissmi);
{
// Convert the Smi {value}.
var_result.Bind(SmiToWord32(value));
@@ -1206,7 +1207,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&done_loop);
}
- Bind(&if_valueisnotsmi);
+ BIND(&if_valueisnotsmi);
{
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
@@ -1215,7 +1216,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
&if_valueisnotheapnumber);
- Bind(&if_valueisheapnumber);
+ BIND(&if_valueisheapnumber);
{
// Truncate the floating point value.
var_result.Bind(TruncateHeapNumberValueToWord32(value));
@@ -1225,7 +1226,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&done_loop);
}
- Bind(&if_valueisnotheapnumber);
+ BIND(&if_valueisnotheapnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1239,7 +1240,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to a Number and perform checks again.
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1248,7 +1249,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&loop);
}
- Bind(&if_valueisnotoddball);
+ BIND(&if_valueisnotoddball);
{
// Convert the {value} to a Number first.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1259,7 +1260,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
}
}
}
- Bind(&done_loop);
+ BIND(&done_loop);
return var_result.value();
}
@@ -1314,11 +1315,11 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
Label ok(this), abort(this, Label::kDeferred);
Branch(WordEqual(lhs, rhs), &ok, &abort);
- Bind(&abort);
+ BIND(&abort);
Abort(bailout_reason);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
void InterpreterAssembler::MaybeDropFrames(Node* context) {
@@ -1331,14 +1332,14 @@ void InterpreterAssembler::MaybeDropFrames(Node* context) {
Label ok(this), drop_frames(this);
Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
- Bind(&drop_frames);
+ BIND(&drop_frames);
// We don't expect this call to return since the frame dropper tears down
// the stack and jumps into the function on the target frame to restart it.
CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
Abort(kUnexpectedReturnFromFrameDropper);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
@@ -1353,8 +1354,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
Node* counter_offset =
- WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
- IntPtrConstant(kPointerSizeLog2));
+ TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset);
@@ -1364,7 +1364,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
Branch(counter_reached_max, &counter_saturated, &counter_ok);
- Bind(&counter_ok);
+ BIND(&counter_ok);
{
Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
@@ -1372,7 +1372,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Goto(&counter_saturated);
}
- Bind(&counter_saturated);
+ BIND(&counter_saturated);
}
// static
@@ -1412,7 +1412,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
// BytecodeGraphBuilder::VisitResumeGenerator.
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
Node* index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
@@ -1425,7 +1425,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
- Bind(&done_loop);
+ BIND(&done_loop);
return array;
}
@@ -1445,7 +1445,7 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
// array contents to not keep them alive artificially.
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
Node* index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
@@ -1460,7 +1460,7 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
- Bind(&done_loop);
+ BIND(&done_loop);
return array;
}
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 2a8f3c8810..b02e024d65 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -179,7 +179,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
CodeStubAssembler::INTPTR_PARAMETERS);
- Bind(&done);
+ BIND(&done);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -190,7 +190,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Variable var_result(this, MachineRepresentation::kTagged);
ExitPoint exit_point(this, &done, &var_result);
- Bind(&try_handler);
+ BIND(&try_handler);
{
Node* context = GetContext();
Node* smi_slot = SmiTag(feedback_slot);
@@ -203,7 +203,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
&exit_point, &miss);
}
- Bind(&miss);
+ BIND(&miss);
{
Node* context = GetContext();
Node* smi_slot = SmiTag(feedback_slot);
@@ -215,7 +215,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
}
- Bind(&done);
+ BIND(&done);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -414,7 +414,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
}
// Slow path when we have to call out to the runtime.
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* name = LoadConstantPoolEntry(name_index);
Node* result = CallRuntime(function_id, context, name);
@@ -470,7 +470,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
}
// Slow path when we have to call out to the runtime
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* name_index = BytecodeOperandIdx(0);
Node* name = LoadConstantPoolEntry(name_index);
@@ -557,7 +557,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
- Bind(&done);
+ BIND(&done);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -735,7 +735,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
&if_import);
- Bind(&if_export);
+ BIND(&if_export);
{
Node* regular_exports =
LoadObjectField(module, Module::kRegularExportsOffset);
@@ -746,7 +746,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&if_import);
+ BIND(&if_import);
{
Node* regular_imports =
LoadObjectField(module, Module::kRegularImportsOffset);
@@ -757,7 +757,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -777,7 +777,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
&if_import);
- Bind(&if_export);
+ BIND(&if_export);
{
Node* regular_exports =
LoadObjectField(module, Module::kRegularExportsOffset);
@@ -788,14 +788,14 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&if_import);
+ BIND(&if_import);
{
// Not supported (probably never).
Abort(kUnsupportedModuleOperation);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -899,7 +899,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Try fast Smi addition first.
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left),
@@ -909,7 +909,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
// Check if the Smi additon overflowed.
Label if_notoverflow(this);
Branch(overflow, &slowpath, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
{
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
@@ -917,7 +917,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
Goto(&end);
}
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -926,7 +926,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
feedback_vector));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -948,7 +948,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Try fast Smi subtraction first.
Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(left),
@@ -958,7 +958,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
// Check if the Smi subtraction overflowed.
Label if_notoverflow(this);
Branch(overflow, &slowpath, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
{
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
@@ -966,7 +966,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
Goto(&end);
}
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -975,7 +975,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
feedback_vector));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -997,7 +997,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
@@ -1008,7 +1008,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
UpdateFeedback(feedback, feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1018,7 +1018,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1040,14 +1040,14 @@ IGNITION_HANDLER(DivSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
var_result.Bind(TrySmiDiv(left, right, &slowpath));
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1057,7 +1057,7 @@ IGNITION_HANDLER(DivSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1079,7 +1079,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi.
var_result.Bind(SmiMod(left, right));
@@ -1089,7 +1089,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
UpdateFeedback(feedback, feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1099,7 +1099,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1172,7 +1172,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
AbortIfWordNotEqual(result_map, HeapNumberMapConstant(),
kExpectedHeapNumber);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
Node* input_feedback =
@@ -1422,21 +1422,21 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
Node* object_map = LoadMap(object);
Branch(IsHeapNumberMap(object_map), &if_objectisnumber, &if_objectisother);
- Bind(&if_objectissmi);
+ BIND(&if_objectissmi);
{
var_result.Bind(object);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
Goto(&if_done);
}
- Bind(&if_objectisnumber);
+ BIND(&if_objectisnumber);
{
var_result.Bind(object);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
Goto(&if_done);
}
- Bind(&if_objectisother);
+ BIND(&if_objectisother);
{
// Convert the {object} to a Number.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1445,7 +1445,7 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
Goto(&if_done);
}
- Bind(&if_done);
+ BIND(&if_done);
StoreRegister(var_result.value(), BytecodeOperandReg(0));
// Record the type feedback collected for {object}.
@@ -1495,14 +1495,14 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
value_var.Bind(value);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&start);
- Bind(&start);
+ BIND(&start);
{
value = value_var.value();
Label if_issmi(this), if_isnotsmi(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
- Bind(&if_issmi);
+ BIND(&if_issmi);
{
// Try fast Smi addition first.
Node* one = SmiConstant(Smi::FromInt(1));
@@ -1514,35 +1514,35 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
var_type_feedback.Bind(
SmiOr(var_type_feedback.value(),
SmiConstant(BinaryOperationFeedback::kSignedSmall)));
result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
- Bind(&if_overflow);
+ BIND(&if_overflow);
{
var_finc_value.Bind(SmiToFloat64(value));
Goto(&do_finc);
}
}
- Bind(&if_isnotsmi);
+ BIND(&if_isnotsmi);
{
// Check if the value is a HeapNumber.
Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
- Bind(&if_valueisnumber);
+ BIND(&if_valueisnumber);
{
// Load the HeapNumber value.
var_finc_value.Bind(LoadHeapNumberValue(value));
Goto(&do_finc);
}
- Bind(&if_valuenotnumber);
+ BIND(&if_valuenotnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1556,7 +1556,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to Number and check again.
value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1565,7 +1565,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Goto(&start);
}
- Bind(&if_valuenotoddball);
+ BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1577,7 +1577,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
}
}
- Bind(&do_finc);
+ BIND(&do_finc);
{
Node* finc_value = var_finc_value.value();
Node* one = Float64Constant(1.0);
@@ -1589,7 +1589,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
@@ -1622,14 +1622,14 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
value_var.Bind(value);
Goto(&start);
- Bind(&start);
+ BIND(&start);
{
value = value_var.value();
Label if_issmi(this), if_isnotsmi(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
- Bind(&if_issmi);
+ BIND(&if_issmi);
{
// Try fast Smi subtraction first.
Node* one = SmiConstant(Smi::FromInt(1));
@@ -1641,35 +1641,35 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
var_type_feedback.Bind(
SmiOr(var_type_feedback.value(),
SmiConstant(BinaryOperationFeedback::kSignedSmall)));
result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
- Bind(&if_overflow);
+ BIND(&if_overflow);
{
var_fdec_value.Bind(SmiToFloat64(value));
Goto(&do_fdec);
}
}
- Bind(&if_isnotsmi);
+ BIND(&if_isnotsmi);
{
// Check if the value is a HeapNumber.
Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
- Bind(&if_valueisnumber);
+ BIND(&if_valueisnumber);
{
// Load the HeapNumber value.
var_fdec_value.Bind(LoadHeapNumberValue(value));
Goto(&do_fdec);
}
- Bind(&if_valuenotnumber);
+ BIND(&if_valuenotnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1683,7 +1683,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to Number and check again.
value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1692,7 +1692,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Goto(&start);
}
- Bind(&if_valuenotoddball);
+ BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1704,7 +1704,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
}
}
- Bind(&do_fdec);
+ BIND(&do_fdec);
{
Node* fdec_value = var_fdec_value.value();
Node* one = Float64Constant(1.0);
@@ -1716,7 +1716,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
@@ -1735,17 +1735,17 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
Node* true_value = BooleanConstant(true);
Node* false_value = BooleanConstant(false);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
result.Bind(false_value);
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
result.Bind(true_value);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
SetAccumulator(result.value());
Dispatch();
}
@@ -1761,12 +1761,12 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
Node* true_value = BooleanConstant(true);
Node* false_value = BooleanConstant(false);
Branch(WordEqual(value, true_value), &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
result.Bind(false_value);
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
if (FLAG_debug_code) {
AbortIfWordNotEqual(value, false_value,
@@ -1775,7 +1775,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
result.Bind(true_value);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
SetAccumulator(result.value());
Dispatch();
}
@@ -1800,8 +1800,8 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
Node* object = LoadRegister(reg_index);
Node* key = GetAccumulator();
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kDeleteProperty_Strict, context, object, key);
+ Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+ SmiConstant(STRICT));
SetAccumulator(result);
Dispatch();
}
@@ -1815,8 +1815,8 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
Node* object = LoadRegister(reg_index);
Node* key = GetAccumulator();
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kDeleteProperty_Sloppy, context, object, key);
+ Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+ SmiConstant(SLOPPY));
SetAccumulator(result);
Dispatch();
}
@@ -2124,313 +2124,41 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
Node* lhs = LoadRegister(reg_index);
Node* rhs = GetAccumulator();
Node* context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- Variable var_result(this, MachineRepresentation::kTagged),
- var_fcmp_lhs(this, MachineRepresentation::kFloat64),
- var_fcmp_rhs(this, MachineRepresentation::kFloat64),
- non_number_value(this, MachineRepresentation::kTagged),
- maybe_smi_value(this, MachineRepresentation::kTagged);
- Label lhs_is_not_smi(this), do_fcmp(this), slow_path(this),
- fast_path_dispatch(this);
-
- GotoIf(TaggedIsNotSmi(lhs), &lhs_is_not_smi);
- {
- Label rhs_is_not_smi(this);
- GotoIf(TaggedIsNotSmi(rhs), &rhs_is_not_smi);
- {
- Comment("Do integer comparison");
- UpdateFeedback(SmiConstant(CompareOperationFeedback::kSignedSmall),
- feedback_vector, slot_index);
- Node* result;
- switch (compare_op) {
- case Token::LT:
- result = SelectBooleanConstant(SmiLessThan(lhs, rhs));
- break;
- case Token::LTE:
- result = SelectBooleanConstant(SmiLessThanOrEqual(lhs, rhs));
- break;
- case Token::GT:
- result = SelectBooleanConstant(SmiLessThan(rhs, lhs));
- break;
- case Token::GTE:
- result = SelectBooleanConstant(SmiLessThanOrEqual(rhs, lhs));
- break;
- case Token::EQ:
- case Token::EQ_STRICT:
- result = SelectBooleanConstant(WordEqual(lhs, rhs));
- break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- Goto(&fast_path_dispatch);
- }
-
- Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
- Label rhs_is_not_number(this);
- GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number);
-
- Comment("Convert lhs to float and load HeapNumber value from rhs");
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
-
- Bind(&rhs_is_not_number);
- {
- non_number_value.Bind(rhs);
- maybe_smi_value.Bind(lhs);
- Goto(&slow_path);
- }
- }
- }
-
- Bind(&lhs_is_not_smi);
- {
- Label rhs_is_not_smi(this), lhs_is_not_number(this),
- rhs_is_not_number(this);
-
- Node* lhs_map = LoadMap(lhs);
- GotoIfNot(IsHeapNumberMap(lhs_map), &lhs_is_not_number);
-
- GotoIfNot(TaggedIsSmi(rhs), &rhs_is_not_smi);
- Comment("Convert rhs to double and load HeapNumber value from lhs");
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
-
- Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
- GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number);
-
- Comment("Load HeapNumber values from lhs and rhs");
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
- }
-
- Bind(&lhs_is_not_number);
- {
- non_number_value.Bind(lhs);
- maybe_smi_value.Bind(rhs);
- Goto(&slow_path);
- }
-
- Bind(&rhs_is_not_number);
- {
- non_number_value.Bind(rhs);
- maybe_smi_value.Bind(lhs);
- Goto(&slow_path);
- }
- }
-
- Bind(&do_fcmp);
- {
- Comment("Do floating point comparison");
- Node* lhs_float = var_fcmp_lhs.value();
- Node* rhs_float = var_fcmp_rhs.value();
- UpdateFeedback(SmiConstant(CompareOperationFeedback::kNumber),
- feedback_vector, slot_index);
-
- // Perform a fast floating point comparison.
- Node* result;
- switch (compare_op) {
- case Token::LT:
- result = SelectBooleanConstant(Float64LessThan(lhs_float, rhs_float));
- break;
- case Token::LTE:
- result = SelectBooleanConstant(
- Float64LessThanOrEqual(lhs_float, rhs_float));
- break;
- case Token::GT:
- result =
- SelectBooleanConstant(Float64GreaterThan(lhs_float, rhs_float));
- break;
- case Token::GTE:
- result = SelectBooleanConstant(
- Float64GreaterThanOrEqual(lhs_float, rhs_float));
- break;
- case Token::EQ:
- case Token::EQ_STRICT: {
- Label check_nan(this);
- var_result.Bind(BooleanConstant(false));
- Branch(Float64Equal(lhs_float, rhs_float), &check_nan,
- &fast_path_dispatch);
- Bind(&check_nan);
- result = SelectBooleanConstant(Float64Equal(lhs_float, lhs_float));
- } break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- Goto(&fast_path_dispatch);
- }
-
- Bind(&fast_path_dispatch);
- {
- SetAccumulator(var_result.value());
- Dispatch();
+ Variable var_type_feedback(this, MachineRepresentation::kTagged);
+ Node* result;
+ switch (compare_op) {
+ case Token::EQ:
+ result = Equal(lhs, rhs, context, &var_type_feedback);
+ break;
+ case Token::EQ_STRICT:
+ result = StrictEqual(lhs, rhs, &var_type_feedback);
+ break;
+ case Token::LT:
+ result = RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs,
+ context, &var_type_feedback);
+ break;
+ case Token::GT:
+ result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs,
+ context, &var_type_feedback);
+ break;
+ case Token::LTE:
+ result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual, lhs,
+ rhs, context, &var_type_feedback);
+ break;
+ case Token::GTE:
+ result = RelationalComparison(CodeStubAssembler::kGreaterThanOrEqual,
+ lhs, rhs, context, &var_type_feedback);
+ break;
+ default:
+ UNREACHABLE();
}
- // Marking a block with more than one predecessor causes register allocator
- // to fail (v8:5998). Add a dummy block as a workaround.
- Label slow_path_deferred(this, Label::kDeferred);
- Bind(&slow_path);
- Goto(&slow_path_deferred);
-
- Bind(&slow_path_deferred);
- {
- // When we reach here, one of the operands is not a Smi / HeapNumber and
- // the other operand could be of any type. The cases where both of them
- // are HeapNumbers / Smis are handled earlier.
- Comment("Collect feedback for non HeapNumber cases.");
- Label update_feedback_and_do_compare(this);
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kAny));
-
- if (Token::IsOrderedRelationalCompareOp(compare_op)) {
- Label check_for_oddball(this);
- // Check for NumberOrOddball feedback.
- Node* non_number_instance_type =
- LoadInstanceType(non_number_value.value());
- GotoIf(
- Word32Equal(non_number_instance_type, Int32Constant(ODDBALL_TYPE)),
- &check_for_oddball);
-
- // Check for string feedback.
- GotoIfNot(IsStringInstanceType(non_number_instance_type),
- &update_feedback_and_do_compare);
-
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &update_feedback_and_do_compare);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- GotoIfNot(IsStringInstanceType(maybe_smi_instance_type),
- &update_feedback_and_do_compare);
-
- var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kString));
- Goto(&update_feedback_and_do_compare);
-
- Bind(&check_for_oddball);
- {
- Label compare_with_oddball_feedback(this);
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &compare_with_oddball_feedback);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- GotoIf(Word32Equal(maybe_smi_instance_type,
- Int32Constant(HEAP_NUMBER_TYPE)),
- &compare_with_oddball_feedback);
-
- Branch(
- Word32Equal(maybe_smi_instance_type, Int32Constant(ODDBALL_TYPE)),
- &compare_with_oddball_feedback, &update_feedback_and_do_compare);
-
- Bind(&compare_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(CompareOperationFeedback::kNumberOrOddball));
- Goto(&update_feedback_and_do_compare);
- }
- }
- } else {
- Label not_string(this), both_are_strings(this);
-
- DCHECK(Token::IsEqualityOp(compare_op));
-
- // If one of them is a Smi and the other is not a number, record "Any"
- // feedback. Equality comparisons do not need feedback about oddballs.
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &update_feedback_and_do_compare);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- Node* non_number_instance_type =
- LoadInstanceType(non_number_value.value());
- GotoIfNot(IsStringInstanceType(maybe_smi_instance_type), &not_string);
-
- // If one value is string and other isn't record "Any" feedback.
- Branch(IsStringInstanceType(non_number_instance_type),
- &both_are_strings, &update_feedback_and_do_compare);
-
- Bind(&both_are_strings);
- {
- Node* operand1_feedback = SelectSmiConstant(
- Word32Equal(Word32And(maybe_smi_instance_type,
- Int32Constant(kIsNotInternalizedMask)),
- Int32Constant(kInternalizedTag)),
- CompareOperationFeedback::kInternalizedString,
- CompareOperationFeedback::kString);
-
- Node* operand2_feedback = SelectSmiConstant(
- Word32Equal(Word32And(non_number_instance_type,
- Int32Constant(kIsNotInternalizedMask)),
- Int32Constant(kInternalizedTag)),
- CompareOperationFeedback::kInternalizedString,
- CompareOperationFeedback::kString);
-
- var_type_feedback.Bind(SmiOr(operand1_feedback, operand2_feedback));
- Goto(&update_feedback_and_do_compare);
- }
-
- Bind(&not_string);
- {
- // Check if both operands are of type JSReceiver.
- GotoIfNot(IsJSReceiverInstanceType(maybe_smi_instance_type),
- &update_feedback_and_do_compare);
-
- GotoIfNot(IsJSReceiverInstanceType(non_number_instance_type),
- &update_feedback_and_do_compare);
-
- var_type_feedback.Bind(
- SmiConstant(CompareOperationFeedback::kReceiver));
- Goto(&update_feedback_and_do_compare);
- }
- }
-
- Bind(&update_feedback_and_do_compare);
- {
- Comment("Do the full compare operation");
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
- Node* result;
- switch (compare_op) {
- case Token::EQ:
- result = Equal(lhs, rhs, context);
- break;
- case Token::EQ_STRICT:
- result = StrictEqual(lhs, rhs);
- break;
- case Token::LT:
- result = RelationalComparison(CodeStubAssembler::kLessThan, lhs,
- rhs, context);
- break;
- case Token::GT:
- result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs,
- rhs, context);
- break;
- case Token::LTE:
- result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual,
- lhs, rhs, context);
- break;
- case Token::GTE:
- result = RelationalComparison(
- CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
- break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- SetAccumulator(var_result.value());
- Dispatch();
- }
- }
+ Node* slot_index = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ SetAccumulator(result);
+ Dispatch();
}
};
@@ -2543,7 +2271,7 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
SetAccumulator(result);
Goto(&end);
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -2594,37 +2322,37 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Switch(literal_flag, &abort, cases, labels, arraysize(cases));
- Bind(&abort);
+ BIND(&abort);
{
Comment("Abort");
Abort(BailoutReason::kUnexpectedTestTypeofLiteralFlag);
Goto(&if_false);
}
- Bind(&if_number);
+ BIND(&if_number);
{
Comment("IfNumber");
GotoIfNumber(object, &if_true);
Goto(&if_false);
}
- Bind(&if_string);
+ BIND(&if_string);
{
Comment("IfString");
GotoIf(TaggedIsSmi(object), &if_false);
Branch(IsString(object), &if_true, &if_false);
}
- Bind(&if_symbol);
+ BIND(&if_symbol);
{
Comment("IfSymbol");
GotoIf(TaggedIsSmi(object), &if_false);
Branch(IsSymbol(object), &if_true, &if_false);
}
- Bind(&if_boolean);
+ BIND(&if_boolean);
{
Comment("IfBoolean");
GotoIf(WordEqual(object, BooleanConstant(true)), &if_true);
Branch(WordEqual(object, BooleanConstant(false)), &if_true, &if_false);
}
- Bind(&if_undefined);
+ BIND(&if_undefined);
{
Comment("IfUndefined");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2636,7 +2364,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Branch(Word32Equal(undetectable_bit, Int32Constant(0)), &if_false,
&if_true);
}
- Bind(&if_function);
+ BIND(&if_function);
{
Comment("IfFunction");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2649,7 +2377,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Int32Constant(1 << Map::kIsCallable)),
&if_true, &if_false);
}
- Bind(&if_object);
+ BIND(&if_object);
{
Comment("IfObject");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2667,29 +2395,29 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true,
&if_false);
}
- Bind(&if_other);
+ BIND(&if_other);
{
// Typeof doesn't return any other string value.
Goto(&if_false);
}
- Bind(&if_false);
+ BIND(&if_false);
{
SetAccumulator(BooleanConstant(false));
Goto(&end);
}
- Bind(&if_true);
+ BIND(&if_true);
{
SetAccumulator(BooleanConstant(true));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
// Jump <imm>
//
-// Jump by number of bytes represented by the immediate operand |imm|.
+// Jump by the number of bytes represented by the immediate operand |imm|.
IGNITION_HANDLER(Jump, InterpreterAssembler) {
Node* relative_jump = BytecodeOperandUImmWord(0);
Jump(relative_jump);
@@ -2697,7 +2425,8 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) {
// JumpConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool.
IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
@@ -2706,7 +2435,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// JumpIfTrue <imm>
//
-// Jump by number of bytes represented by an immediate operand if the
+// Jump by the number of bytes represented by an immediate operand if the
// accumulator contains true. This only works for boolean inputs, and
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
@@ -2720,9 +2449,9 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// JumpIfTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains true. This only works for boolean inputs, and
-// will misbehave if passed arbitrary input values.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the accumulator contains true. This only works for boolean inputs,
+// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2735,7 +2464,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// JumpIfFalse <imm>
//
-// Jump by number of bytes represented by an immediate operand if the
+// Jump by the number of bytes represented by an immediate operand if the
// accumulator contains false. This only works for boolean inputs, and
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
@@ -2749,9 +2478,9 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// JumpIfFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains false. This only works for boolean inputs, and
-// will misbehave if passed arbitrary input values.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the accumulator contains false. This only works for boolean inputs,
+// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2764,71 +2493,71 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
// JumpIfToBooleanTrue <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Jump(relative_jump);
- Bind(&if_false);
+ BIND(&if_false);
Dispatch();
}
// JumpIfToBooleanTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is true when the object is cast
-// to boolean.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is true when the object is
+// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Jump(relative_jump);
- Bind(&if_false);
+ BIND(&if_false);
Dispatch();
}
// JumpIfToBooleanFalse <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Dispatch();
- Bind(&if_false);
+ BIND(&if_false);
Jump(relative_jump);
}
// JumpIfToBooleanFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is false when the object is cast
-// to boolean.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is false when the object is
+// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Dispatch();
- Bind(&if_false);
+ BIND(&if_false);
Jump(relative_jump);
}
// JumpIfNull <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2839,8 +2568,8 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// JumpIfNullConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the null constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* null_value = HeapConstant(isolate()->factory()->null_value());
@@ -2851,7 +2580,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// JumpIfNotNull <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2862,8 +2591,8 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// JumpIfNotNullConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is not the null constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* null_value = HeapConstant(isolate()->factory()->null_value());
@@ -2874,7 +2603,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// JumpIfUndefined <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2885,8 +2614,8 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// JumpIfUndefinedConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the undefined constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
@@ -2897,7 +2626,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// JumpIfNotUndefined <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2908,8 +2637,9 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// JumpIfNotUndefinedConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is not the undefined constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is not the undefined
+// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
@@ -2920,7 +2650,7 @@ IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
// JumpIfJSReceiver <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2929,19 +2659,19 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
- Bind(&if_notsmi);
+ BIND(&if_notsmi);
Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
- Bind(&if_object);
+ BIND(&if_object);
Jump(relative_jump);
- Bind(&if_notobject);
+ BIND(&if_notobject);
Dispatch();
}
// JumpIfJSReceiverConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
-// the object referenced by the accumulator is a JSReceiver.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2950,19 +2680,19 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Label if_object(this), if_notobject(this), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
- Bind(&if_notsmi);
+ BIND(&if_notsmi);
Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
- Bind(&if_object);
+ BIND(&if_object);
Jump(relative_jump);
- Bind(&if_notobject);
+ BIND(&if_notobject);
Dispatch();
}
// JumpIfNotHole <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the hole.
IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2973,8 +2703,8 @@ IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) {
// JumpIfNotHoleConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the hole constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the hole constant.
IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
@@ -2985,7 +2715,7 @@ IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
-// Jump by number of bytes represented by the immediate operand |imm|. Also
+// Jump by the number of bytes represented by the immediate operand |imm|. Also
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
@@ -2999,10 +2729,10 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Node* condition = Int32GreaterThanOrEqual(loop_depth, osr_level);
Branch(condition, &ok, &osr_armed);
- Bind(&ok);
+ BIND(&ok);
JumpBackward(relative_jump);
- Bind(&osr_armed);
+ BIND(&osr_armed);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
Node* target = HeapConstant(callable.code());
@@ -3012,6 +2742,37 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
}
}
+// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
+//
+// Jump by the number of bytes defined by a Smi in a table in the constant pool,
+// where the table starts at |table_start| and has |table_length| entries.
+// The table is indexed by the accumulator, minus |case_value_base|. If the
+// case_value falls outside of the table |table_length|, fall-through to the
+// next bytecode.
+IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
+ Node* acc = GetAccumulator();
+ Node* table_start = BytecodeOperandIdx(0);
+ Node* table_length = BytecodeOperandUImmWord(1);
+ Node* case_value_base = BytecodeOperandImmIntPtr(2);
+
+ Label fall_through(this);
+
+ // The accumulator must be a Smi.
+ // TODO(leszeks): Add a bytecode with type feedback that allows other
+ // accumulator values.
+ CSA_ASSERT(this, TaggedIsSmi(acc));
+
+ Node* case_value = IntPtrSub(SmiUntag(acc), case_value_base);
+ GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through);
+ GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through);
+ Node* entry = IntPtrAdd(table_start, case_value);
+ Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+ Jump(relative_jump);
+
+ BIND(&fall_through);
+ Dispatch();
+}
+
// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
// Creates a regular expression literal for literal index <literal_idx> with
@@ -3045,7 +2806,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(bytecode_flags),
&fast_shallow_clone, &call_runtime);
- Bind(&fast_shallow_clone);
+ BIND(&fast_shallow_clone);
{
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitFastCloneShallowArray(
@@ -3054,7 +2815,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Dispatch();
}
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
@@ -3079,23 +2840,21 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
// Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
- Node* fast_clone_properties_count = DecodeWordFromWord32<
- CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
- Branch(WordNotEqual(fast_clone_properties_count, IntPtrConstant(0)),
+ Branch(IsSetWord32<CreateObjectLiteralFlags::FastCloneSupportedBit>(
+ bytecode_flags),
&if_fast_clone, &if_not_fast_clone);
- Bind(&if_fast_clone);
+ BIND(&if_fast_clone);
{
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitFastCloneShallowObject(
- &if_not_fast_clone, closure, literal_index,
- fast_clone_properties_count);
+ &if_not_fast_clone, closure, literal_index);
StoreRegister(result, BytecodeOperandReg(3));
Dispatch();
}
- Bind(&if_not_fast_clone);
+ BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
Node* index = BytecodeOperandIdx(0);
@@ -3135,7 +2894,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
shared, feedback_vector, vector_index, context));
Dispatch();
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* tenured_raw =
DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
@@ -3249,7 +3008,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
Node* compare = Word32And(compiler_hints, duplicate_parameters_bit);
Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
- Bind(&if_not_duplicate_parameters);
+ BIND(&if_not_duplicate_parameters);
{
ArgumentsBuiltinsAssembler constructor_assembler(state());
Node* result =
@@ -3258,7 +3017,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
Dispatch();
}
- Bind(&if_duplicate_parameters);
+ BIND(&if_duplicate_parameters);
{
Node* result =
CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
@@ -3301,10 +3060,10 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
Node* interrupt = StackCheckTriggeredInterrupt();
Branch(interrupt, &stack_check_interrupt, &ok);
- Bind(&ok);
+ BIND(&ok);
Dispatch();
- Bind(&stack_check_interrupt);
+ BIND(&stack_check_interrupt);
{
Node* context = GetContext();
CallRuntime(Runtime::kStackGuard, context);
@@ -3427,7 +3186,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
cache_length);
Dispatch();
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* result_triple =
CallRuntime(Runtime::kForInPrepare, context, receiver);
@@ -3438,7 +3197,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
cache_length);
Dispatch();
}
- Bind(&nothing_to_iterate);
+ BIND(&nothing_to_iterate);
{
// Receiver is null or undefined or descriptors are zero length.
Node* zero = SmiConstant(0);
@@ -3468,13 +3227,13 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Label if_fast(this), if_slow(this, Label::kDeferred);
Node* receiver_map = LoadMap(receiver);
Branch(WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
- Bind(&if_fast);
+ BIND(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
SetAccumulator(key);
Dispatch();
}
- Bind(&if_slow);
+ BIND(&if_slow);
{
// Record the fact that we hit the for-in slow path.
Node* vector_index = BytecodeOperandIdx(3);
@@ -3505,17 +3264,17 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
// Check if {index} is at {cache_length} already.
Label if_true(this), if_false(this), end(this);
Branch(WordEqual(index, cache_length), &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
SetAccumulator(BooleanConstant(false));
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
SetAccumulator(BooleanConstant(true));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -3574,7 +3333,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
STATIC_ASSERT(LastStepAction == StepIn);
Node* step_next = Int32Constant(StepNext);
Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
- Bind(&ok);
+ BIND(&ok);
Node* array =
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
@@ -3598,7 +3357,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
static_cast<int>(SuspendFlags::kAsyncGeneratorAwait))),
&if_asyncgeneratorawait, &if_notasyncgeneratorawait);
- Bind(&if_notasyncgeneratorawait);
+ BIND(&if_notasyncgeneratorawait);
{
// For ordinary yields (and for AwaitExpressions in Async Functions, which
// are implemented as ordinary yields), it is safe to write over the
@@ -3608,7 +3367,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Goto(&merge);
}
- Bind(&if_asyncgeneratorawait);
+ BIND(&if_asyncgeneratorawait);
{
// An AwaitExpression in an Async Generator requires writing to the
// [await_input_or_debug_pos] field.
@@ -3619,10 +3378,10 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Goto(&merge);
}
- Bind(&merge);
+ BIND(&merge);
Dispatch();
- Bind(&if_stepping);
+ BIND(&if_stepping);
{
Node* context = GetContext();
CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index bdd079ab84..e8572ba1d4 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -89,7 +89,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
#define HANDLE_CASE(name, lower_case, expected_arg_count) \
- __ Bind(&lower_case); \
+ __ BIND(&lower_case); \
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, arg_count); \
} \
@@ -98,14 +98,14 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
- __ Bind(&abort);
+ __ BIND(&abort);
{
__ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
result.Bind(__ UndefinedConstant());
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return result.value();
}
@@ -133,19 +133,19 @@ Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
__ Branch(condition, &return_true, &return_false);
- __ Bind(&return_true);
+ __ BIND(&return_true);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&return_false);
+ __ BIND(&return_false);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -166,19 +166,19 @@ Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count,
kInstanceTypeGreaterThanOrEqual);
__ Branch(condition, &return_true, &return_false);
- __ Bind(&return_true);
+ __ BIND(&return_true);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&return_false);
+ __ BIND(&return_false);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -197,6 +197,36 @@ Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count,
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
+Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_MAP_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSMapIterator(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_MAP_ITERATOR_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_SET_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSSetIterator(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_SET_ITERATOR_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_WEAK_MAP_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSWeakSet(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_WEAK_SET_TYPE);
+}
+
Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
// TODO(ishell): Use SelectBooleanConstant here.
InterpreterAssembler::Variable return_value(assembler_,
@@ -207,19 +237,19 @@ Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* arg = __ LoadRegister(input);
__ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
- __ Bind(&if_smi);
+ __ BIND(&if_smi);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&if_not_smi);
+ __ BIND(&if_not_smi);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -305,7 +335,7 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
__ GotoIfNot(comparison, &arg_count_positive);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
- __ Bind(&arg_count_positive);
+ __ BIND(&arg_count_positive);
}
Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
@@ -344,7 +374,7 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
return_value.Bind(iterator);
__ Goto(&done);
- __ Bind(&not_receiver);
+ __ BIND(&not_receiver);
{
return_value.Bind(
__ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
@@ -353,7 +383,7 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
__ Goto(&done);
}
- __ Bind(&done);
+ __ BIND(&done);
return return_value.value();
}
@@ -369,6 +399,53 @@ Node* IntrinsicsGenerator::AsyncGeneratorGetAwaitInputOrDebugPos(
return value;
}
+Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsBuiltinCall(input, context,
+ Builtins::kCreateGeneratorObject);
+}
+
+Node* IntrinsicsGenerator::GeneratorGetContext(Node* args_reg, Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kContextOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(Node* args_reg,
+ Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg,
+ Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
+ __ StoreObjectFieldNoWriteBarrier(
+ generator, JSGeneratorObject::kContinuationOffset,
+ __ SmiConstant(JSGeneratorObject::kGeneratorClosed));
+
+ return value;
+}
+
Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsBuiltinCall(input, context,
@@ -387,7 +464,7 @@ void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
__ GotoIf(comparison, &match);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&match);
- __ Bind(&match);
+ __ BIND(&match);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 137bdbf9cb..3a69069532 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -13,26 +13,37 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(AsyncGeneratorGetAwaitInputOrDebugPos, \
- async_generator_get_await_input_or_debug_pos, 1) \
- V(AsyncGeneratorReject, async_generator_reject, 2) \
- V(AsyncGeneratorResolve, async_generator_resolve, 3) \
- V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
- V(CreateIterResultObject, create_iter_result_object, 2) \
- V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(SubString, sub_string, 3) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
+#define INTRINSICS_LIST(V) \
+ V(AsyncGeneratorGetAwaitInputOrDebugPos, \
+ async_generator_get_await_input_or_debug_pos, 1) \
+ V(AsyncGeneratorReject, async_generator_reject, 2) \
+ V(AsyncGeneratorResolve, async_generator_resolve, 3) \
+ V(CreateJSGeneratorObject, create_js_generator_object, 2) \
+ V(GeneratorGetContext, generator_get_context, 1) \
+ V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
+ V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
+ V(GeneratorClose, generator_close, 1) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSMap, is_js_map, 1) \
+ V(IsJSMapIterator, is_js_map_iterator, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsJSSet, is_js_set, 1) \
+ V(IsJSSetIterator, is_js_set_iterator, 1) \
+ V(IsJSWeakMap, is_js_weak_map, 1) \
+ V(IsJSWeakSet, is_js_weak_set, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
V(ToObject, to_object, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 42b2b18ad1..b793ae5310 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -16,6 +16,7 @@
#include "src/log.h"
#include "src/objects.h"
#include "src/setup-isolate.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -109,14 +110,14 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
return 0;
}
-void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+void Interpreter::IterateDispatchTable(RootVisitor* v) {
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
Object* code = code_entry == nullptr
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
Object* old_code = code;
- v->VisitPointer(&code);
+ v->VisitRootPointer(Root::kDispatchTable, &code);
if (code != old_code) {
dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
}
@@ -158,8 +159,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
OFStream os(stdout);
std::unique_ptr<char[]> name = info()->GetDebugName();
os << "[generating bytecode for function: " << info()->GetDebugName().get()
- << "]" << std::endl
- << std::flush;
+ << "]" << std::endl;
}
return SUCCEEDED;
@@ -198,7 +198,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
if (print_bytecode_) {
OFStream os(stdout);
- bytecodes->Print(os);
+ bytecodes->Disassemble(os);
os << std::flush;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 4dc6241c24..2df29bee41 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -24,6 +24,7 @@ class Callable;
class CompilationInfo;
class CompilationJob;
class SetupIsolateDelegate;
+class RootVisitor;
namespace interpreter {
@@ -44,7 +45,7 @@ class Interpreter {
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// GC support.
- void IterateDispatchTable(ObjectVisitor* v);
+ void IterateDispatchTable(RootVisitor* v);
// Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
const char* LookupNameOfBytecodeHandler(Code* code);