summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc661
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h71
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc360
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h33
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h20
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc42
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-table.h1
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h111
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc263
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h115
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h6
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc7
-rw-r--r--deps/v8/src/interpreter/bytecodes.h38
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc417
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h12
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc16
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h1
-rw-r--r--deps/v8/src/interpreter/interpreter.cc399
-rw-r--r--deps/v8/src/interpreter/interpreter.h24
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc27
29 files changed, 1304 insertions, 1392 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index dfa395095a..904a8e021d 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -31,7 +31,8 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
register_allocator_(fixed_register_count()),
bytecode_array_writer_(zone, &constant_array_builder_,
source_position_mode),
- pipeline_(&bytecode_array_writer_) {
+ pipeline_(&bytecode_array_writer_),
+ register_optimizer_(nullptr) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
@@ -45,14 +46,12 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
}
if (FLAG_ignition_reo) {
- pipeline_ = new (zone) BytecodeRegisterOptimizer(
+ register_optimizer_ = new (zone) BytecodeRegisterOptimizer(
zone, &register_allocator_, fixed_register_count(), parameter_count,
pipeline_);
}
- return_position_ =
- literal ? std::max(literal->start_position(), literal->end_position() - 1)
- : kNoSourcePosition;
+ return_position_ = literal ? literal->return_position() : kNoSourcePosition;
}
Register BytecodeArrayBuilder::first_context_register() const {
@@ -75,108 +74,222 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
+ int register_count = total_register_count();
+
+ if (register_optimizer_) {
+ register_optimizer_->Flush();
+ register_count = register_optimizer_->maxiumum_register_index() + 1;
+ }
+
Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
- return pipeline_->ToBytecodeArray(isolate, total_register_count(),
- parameter_count(), handler_table);
-}
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- uint32_t operand3) {
- DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
- &latest_source_info_);
- pipeline()->Write(&node);
+ return pipeline_->ToBytecodeArray(isolate, register_count, parameter_count(),
+ handler_table);
+}
+
+BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
+ Bytecode bytecode) {
+ BytecodeSourceInfo source_position;
+ if (latest_source_info_.is_valid()) {
+ // Statement positions need to be emitted immediately. Expression
+ // positions can be pushed back until a bytecode is found that can
+ // throw (if expression position filtering is turned on). We only
+ // invalidate the existing source position information if it is used.
+ if (latest_source_info_.is_statement() ||
+ !FLAG_ignition_filter_expression_positions ||
+ !Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+ source_position = latest_source_info_;
+ latest_source_info_.set_invalid();
+ }
+ }
+ return source_position;
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2) {
- DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
- BytecodeNode node(bytecode, operand0, operand1, operand2,
- &latest_source_info_);
- pipeline()->Write(&node);
-}
+namespace {
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1) {
- DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
- BytecodeNode node(bytecode, operand0, operand1, &latest_source_info_);
- pipeline()->Write(&node);
-}
+template <OperandTypeInfo type_info>
+class UnsignedOperandHelper {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, size_t value)) {
+ DCHECK(IsValid(value));
+ return static_cast<uint32_t>(value);
+ }
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
- DCHECK(OperandsAreValid(bytecode, 1, operand0));
- BytecodeNode node(bytecode, operand0, &latest_source_info_);
- pipeline()->Write(&node);
-}
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ DCHECK_GE(value, 0);
+ return Convert(builder, static_cast<size_t>(value));
+ }
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- DCHECK(OperandsAreValid(bytecode, 0));
- BytecodeNode node(bytecode, &latest_source_info_);
- pipeline()->Write(&node);
-}
+ private:
+ static bool IsValid(size_t value) {
+ switch (type_info) {
+ case OperandTypeInfo::kFixedUnsignedByte:
+ return value <= kMaxUInt8;
+ case OperandTypeInfo::kFixedUnsignedShort:
+ return value <= kMaxUInt16;
+ case OperandTypeInfo::kScalableUnsignedByte:
+ return value <= kMaxUInt32;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+};
+
+template <OperandType>
+class OperandHelper {};
+
+#define DEFINE_UNSIGNED_OPERAND_HELPER(Name, Type) \
+ template <> \
+ class OperandHelper<OperandType::k##Name> \
+ : public UnsignedOperandHelper<Type> {};
+UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+#undef DEFINE_UNSIGNED_OPERAND_HELPER
+
+template <>
+class OperandHelper<OperandType::kImm> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ return static_cast<uint32_t>(value);
+ }
+};
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, BytecodeLabel* label) {
- BytecodeNode node(bytecode, 0, &latest_source_info_);
- pipeline_->WriteJump(&node, label);
- LeaveBasicBlock();
-}
+template <>
+class OperandHelper<OperandType::kReg> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ return builder->GetInputRegisterOperand(reg);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegList> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ return builder->GetInputRegisterListOperand(reg_list);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegPair> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(reg_list.register_count(), 2);
+ return builder->GetInputRegisterListOperand(reg_list);
+ }
+};
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, uint32_t operand0,
- BytecodeLabel* label) {
- BytecodeNode node(bytecode, 0, operand0, &latest_source_info_);
- pipeline_->WriteJump(&node, label);
- LeaveBasicBlock();
-}
+template <>
+class OperandHelper<OperandType::kRegOut> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ return builder->GetOutputRegisterOperand(reg);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutPair> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(2, reg_list.register_count());
+ return builder->GetOutputRegisterListOperand(reg_list);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutTriple> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(3, reg_list.register_count());
+ return builder->GetOutputRegisterListOperand(reg_list);
+ }
+};
+
+} // namespace
+
+template <OperandType... operand_types>
+class BytecodeNodeBuilder {
+ public:
+ template <typename... Operands>
+ INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
+ BytecodeSourceInfo source_info,
+ Bytecode bytecode, Operands... operands)) {
+ builder->PrepareToOutputBytecode(bytecode);
+ // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
+ // expand both the OperandType... and Operands... parameter packs e.g. for:
+ // BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
+ // Register, int>(..., Register reg, int immediate)
+ // the code will expand into:
+ // OperandHelper<OperandType::kReg>::Convert(builder, reg),
+ // OperandHelper<OperandType::kImm>::Convert(builder, immediate),
+ return BytecodeNode(
+ bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
+ source_info);
+ }
+};
+
+#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+ this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+ operands...)); \
+ pipeline()->Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+ this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+ operands...)); \
+ pipeline()->WriteJump(&node, label); \
+ LeaveBasicBlock(); \
+ }
+BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
+#undef DEFINE_BYTECODE_OUTPUT
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg,
int feedback_slot) {
switch (op) {
case Token::Value::ADD:
- Output(Bytecode::kAdd, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputAdd(reg, feedback_slot);
break;
case Token::Value::SUB:
- Output(Bytecode::kSub, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputSub(reg, feedback_slot);
break;
case Token::Value::MUL:
- Output(Bytecode::kMul, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputMul(reg, feedback_slot);
break;
case Token::Value::DIV:
- Output(Bytecode::kDiv, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputDiv(reg, feedback_slot);
break;
case Token::Value::MOD:
- Output(Bytecode::kMod, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputMod(reg, feedback_slot);
break;
case Token::Value::BIT_OR:
- Output(Bytecode::kBitwiseOr, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseOr(reg, feedback_slot);
break;
case Token::Value::BIT_XOR:
- Output(Bytecode::kBitwiseXor, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseXor(reg, feedback_slot);
break;
case Token::Value::BIT_AND:
- Output(Bytecode::kBitwiseAnd, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseAnd(reg, feedback_slot);
break;
case Token::Value::SHL:
- Output(Bytecode::kShiftLeft, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftLeft(reg, feedback_slot);
break;
case Token::Value::SAR:
- Output(Bytecode::kShiftRight, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftRight(reg, feedback_slot);
break;
case Token::Value::SHR:
- Output(Bytecode::kShiftRightLogical, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftRightLogical(reg, feedback_slot);
break;
default:
UNREACHABLE();
@@ -187,21 +300,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
int feedback_slot) {
if (op == Token::Value::ADD) {
- Output(Bytecode::kInc, UnsignedOperand(feedback_slot));
+ OutputInc(feedback_slot);
} else {
DCHECK_EQ(op, Token::Value::SUB);
- Output(Bytecode::kDec, UnsignedOperand(feedback_slot));
+ OutputDec(feedback_slot);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
- Output(Bytecode::kToBooleanLogicalNot);
+ OutputToBooleanLogicalNot();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
- Output(Bytecode::kTypeOf);
+ OutputTypeOf();
return *this;
}
@@ -209,38 +322,31 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
Token::Value op, Register reg, int feedback_slot) {
switch (op) {
case Token::Value::EQ:
- Output(Bytecode::kTestEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestEqual(reg, feedback_slot);
break;
case Token::Value::NE:
- Output(Bytecode::kTestNotEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestNotEqual(reg, feedback_slot);
break;
case Token::Value::EQ_STRICT:
- Output(Bytecode::kTestEqualStrict, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestEqualStrict(reg, feedback_slot);
break;
case Token::Value::LT:
- Output(Bytecode::kTestLessThan, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestLessThan(reg, feedback_slot);
break;
case Token::Value::GT:
- Output(Bytecode::kTestGreaterThan, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestGreaterThan(reg, feedback_slot);
break;
case Token::Value::LTE:
- Output(Bytecode::kTestLessThanOrEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestLessThanOrEqual(reg, feedback_slot);
break;
case Token::Value::GTE:
- Output(Bytecode::kTestGreaterThanOrEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestGreaterThanOrEqual(reg, feedback_slot);
break;
case Token::Value::INSTANCEOF:
- Output(Bytecode::kTestInstanceOf, RegisterOperand(reg));
+ OutputTestInstanceOf(reg);
break;
case Token::Value::IN:
- Output(Bytecode::kTestIn, RegisterOperand(reg));
+ OutputTestIn(reg);
break;
default:
UNREACHABLE();
@@ -250,7 +356,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
size_t entry) {
- Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ OutputLdaConstant(entry);
return *this;
}
@@ -258,70 +364,82 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
v8::internal::Smi* smi) {
int32_t raw_smi = smi->value();
if (raw_smi == 0) {
- Output(Bytecode::kLdaZero);
+ OutputLdaZero();
} else {
- Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
+ OutputLdaSmi(raw_smi);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ OutputLdaConstant(entry);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
- Output(Bytecode::kLdaUndefined);
+ OutputLdaUndefined();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
- Output(Bytecode::kLdaNull);
+ OutputLdaNull();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
- Output(Bytecode::kLdaTheHole);
+ OutputLdaTheHole();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
- Output(Bytecode::kLdaTrue);
+ OutputLdaTrue();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
- Output(Bytecode::kLdaFalse);
+ OutputLdaFalse();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- Output(Bytecode::kLdar, RegisterOperand(reg));
+ if (register_optimizer_) {
+ register_optimizer_->DoLdar(reg, CurrentSourcePosition(Bytecode::kLdar));
+ } else {
+ OutputLdar(reg);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
- Output(Bytecode::kStar, RegisterOperand(reg));
+ if (register_optimizer_) {
+ register_optimizer_->DoStar(reg, CurrentSourcePosition(Bytecode::kStar));
+ } else {
+ OutputStar(reg);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
+ if (register_optimizer_) {
+ register_optimizer_->DoMov(from, to, CurrentSourcePosition(Bytecode::kMov));
+ } else {
+ OutputMov(from, to);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
TypeofMode typeof_mode) {
if (typeof_mode == INSIDE_TYPEOF) {
- Output(Bytecode::kLdaGlobalInsideTypeof, feedback_slot);
+ OutputLdaGlobalInsideTypeof(feedback_slot);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- Output(Bytecode::kLdaGlobal, UnsignedOperand(feedback_slot));
+ OutputLdaGlobal(feedback_slot);
}
return *this;
}
@@ -330,12 +448,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaGlobalSloppy, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ OutputStaGlobalSloppy(name_index, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaGlobalStrict, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ OutputStaGlobalStrict(name_index, feedback_slot);
}
return *this;
}
@@ -343,16 +459,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index,
int depth) {
- Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index), UnsignedOperand(depth));
+ if (context.is_current_context() && depth == 0) {
+ OutputLdaCurrentContextSlot(slot_index);
+ } else {
+ OutputLdaContextSlot(context, slot_index, depth);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index,
int depth) {
- Output(Bytecode::kStaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index), UnsignedOperand(depth));
+ if (context.is_current_context() && depth == 0) {
+ OutputStaCurrentContextSlot(slot_index);
+ } else {
+ OutputStaContextSlot(context, slot_index, depth);
+ }
return *this;
}
@@ -360,10 +482,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
const Handle<String> name, TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
- Output(Bytecode::kLdaLookupSlotInsideTypeof, UnsignedOperand(name_index));
+ OutputLdaLookupSlotInsideTypeof(name_index);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- Output(Bytecode::kLdaLookupSlot, UnsignedOperand(name_index));
+ OutputLdaLookupSlot(name_index);
}
return *this;
}
@@ -371,24 +493,26 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
const Handle<String> name, TypeofMode typeof_mode, int slot_index,
int depth) {
- Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
- ? Bytecode::kLdaLookupContextSlotInsideTypeof
- : Bytecode::kLdaLookupContextSlot;
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(slot_index),
- UnsignedOperand(depth));
+ if (typeof_mode == INSIDE_TYPEOF) {
+ OutputLdaLookupContextSlotInsideTypeof(name_index, slot_index, depth);
+ } else {
+ DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+ OutputLdaLookupContextSlot(name_index, slot_index, depth);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
int depth) {
- Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
- ? Bytecode::kLdaLookupGlobalSlotInsideTypeof
- : Bytecode::kLdaLookupGlobalSlot;
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot),
- UnsignedOperand(depth));
+ if (typeof_mode == INSIDE_TYPEOF) {
+ OutputLdaLookupGlobalSlotInsideTypeof(name_index, feedback_slot, depth);
+ } else {
+ DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+ OutputLdaLookupGlobalSlot(name_index, feedback_slot, depth);
+ }
return *this;
}
@@ -396,10 +520,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
const Handle<String> name, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaLookupSlotSloppy, UnsignedOperand(name_index));
+ OutputStaLookupSlotSloppy(name_index);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaLookupSlotStrict, UnsignedOperand(name_index));
+ OutputStaLookupSlotStrict(name_index);
}
return *this;
}
@@ -407,15 +531,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
- UnsignedOperand(feedback_slot));
+ OutputLdaKeyedProperty(object, feedback_slot);
return *this;
}
@@ -424,12 +546,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaNamedPropertySloppy, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaNamedPropertyStrict, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputStaNamedPropertyStrict(object, name_index, feedback_slot);
}
return *this;
}
@@ -438,27 +558,24 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaKeyedPropertySloppy, RegisterOperand(object),
- RegisterOperand(key), UnsignedOperand(feedback_slot));
+ OutputStaKeyedPropertySloppy(object, key, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaKeyedPropertyStrict, RegisterOperand(object),
- RegisterOperand(key), UnsignedOperand(feedback_slot));
+ OutputStaKeyedPropertyStrict(object, key, feedback_slot);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
int flags) {
- Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
- UnsignedOperand(flags));
+ OutputCreateClosure(entry, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
Handle<ScopeInfo> scope_info) {
size_t entry = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateBlockContext, UnsignedOperand(entry));
+ OutputCreateBlockContext(entry);
return *this;
}
@@ -466,21 +583,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
size_t name_index = GetConstantPoolEntry(name);
size_t scope_info_index = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
- UnsignedOperand(name_index), UnsignedOperand(scope_info_index));
+ OutputCreateCatchContext(exception, name_index, scope_info_index);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
- Output(Bytecode::kCreateFunctionContext, UnsignedOperand(slots));
+ OutputCreateFunctionContext(slots);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
Register object, Handle<ScopeInfo> scope_info) {
size_t scope_info_index = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateWithContext, RegisterOperand(object),
- UnsignedOperand(scope_info_index));
+ OutputCreateWithContext(object, scope_info_index);
return *this;
}
@@ -488,13 +603,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
CreateArgumentsType type) {
switch (type) {
case CreateArgumentsType::kMappedArguments:
- Output(Bytecode::kCreateMappedArguments);
+ OutputCreateMappedArguments();
break;
case CreateArgumentsType::kUnmappedArguments:
- Output(Bytecode::kCreateUnmappedArguments);
+ OutputCreateUnmappedArguments();
break;
case CreateArgumentsType::kRestParameter:
- Output(Bytecode::kCreateRestParameter);
+ OutputCreateRestParameter();
break;
default:
UNREACHABLE();
@@ -505,17 +620,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
Handle<String> pattern, int literal_index, int flags) {
size_t pattern_entry = GetConstantPoolEntry(pattern);
- Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ OutputCreateRegExpLiteral(pattern_entry, literal_index, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
Handle<FixedArray> constant_elements, int literal_index, int flags) {
size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
- Output(Bytecode::kCreateArrayLiteral,
- UnsignedOperand(constant_elements_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
return *this;
}
@@ -523,42 +635,43 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
Handle<FixedArray> constant_properties, int literal_index, int flags,
Register output) {
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
- Output(Bytecode::kCreateObjectLiteral,
- UnsignedOperand(constant_properties_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags),
- RegisterOperand(output));
+ OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
+ output);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
- Output(Bytecode::kPushContext, RegisterOperand(context));
+ OutputPushContext(context);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
- Output(Bytecode::kPopContext, RegisterOperand(context));
+ OutputPopContext(context);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
Register out) {
- Output(Bytecode::kToObject, RegisterOperand(out));
+ OutputToObject(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
Register out) {
- Output(Bytecode::kToName, RegisterOperand(out));
+ OutputToName(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
Register out) {
- Output(Bytecode::kToNumber, RegisterOperand(out));
+ OutputToNumber(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+ // Flush the register optimizer when binding a label to ensure all
+ // expected registers are valid when jumping to this label.
+ if (register_optimizer_) register_optimizer_->Flush();
pipeline_->BindLabel(label);
LeaveBasicBlock();
return *this;
@@ -572,42 +685,42 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
- OutputJump(Bytecode::kJump, label);
+ OutputJump(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
// The peephole optimizer attempts to simplify JumpIfToBooleanTrue
// to JumpIfTrue.
- OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+ OutputJumpIfToBooleanTrue(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+ OutputJumpIfToBooleanFalse(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfNull, label);
+ OutputJumpIfNull(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfUndefined, label);
+ OutputJumpIfUndefined(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfNotHole, label);
+ OutputJumpIfNotHole(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
int loop_depth) {
- OutputJump(Bytecode::kJumpLoop, UnsignedOperand(loop_depth), label);
+ OutputJumpLoop(label, 0, loop_depth);
return *this;
}
@@ -625,44 +738,42 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
// statement's position.
latest_source_info_.ForceExpressionPosition(position);
}
- Output(Bytecode::kStackCheck);
+ OutputStackCheck();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
- Output(Bytecode::kThrow);
+ OutputThrow();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
- Output(Bytecode::kReThrow);
+ OutputReThrow();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
SetReturnPosition();
- Output(Bytecode::kReturn);
+ OutputReturn();
return_seen_in_block_ = true;
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
- Output(Bytecode::kDebugger);
+ OutputDebugger();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
Register receiver, RegisterList cache_info_triple) {
DCHECK_EQ(3, cache_info_triple.register_count());
- Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
- RegisterOperand(cache_info_triple.first_register()));
+ OutputForInPrepare(receiver, cache_info_triple);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInContinue(
Register index, Register cache_length) {
- Output(Bytecode::kForInContinue, RegisterOperand(index),
- RegisterOperand(cache_length));
+ OutputForInContinue(index, cache_length);
return *this;
}
@@ -670,27 +781,36 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
Register receiver, Register index, RegisterList cache_type_array_pair,
int feedback_slot) {
DCHECK_EQ(2, cache_type_array_pair.register_count());
- Output(Bytecode::kForInNext, RegisterOperand(receiver),
- RegisterOperand(index),
- RegisterOperand(cache_type_array_pair.first_register()),
- UnsignedOperand(feedback_slot));
+ OutputForInNext(receiver, index, cache_type_array_pair, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
- Output(Bytecode::kForInStep, RegisterOperand(index));
+ OutputForInStep(index);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreModuleVariable(int cell_index,
+ int depth) {
+ OutputStaModuleVariable(cell_index, depth);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadModuleVariable(int cell_index,
+ int depth) {
+ OutputLdaModuleVariable(cell_index, depth);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
Register generator) {
- Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
+ OutputSuspendGenerator(generator);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
Register generator) {
- Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
+ OutputResumeGenerator(generator);
return *this;
}
@@ -722,18 +842,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
RegisterList args,
int feedback_slot,
+ Call::CallType call_type,
TailCallMode tail_call_mode) {
if (tail_call_mode == TailCallMode::kDisallow) {
- Output(Bytecode::kCall, RegisterOperand(callable),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot));
+ if (call_type == Call::NAMED_PROPERTY_CALL ||
+ call_type == Call::KEYED_PROPERTY_CALL) {
+ OutputCallProperty(callable, args, args.register_count(), feedback_slot);
+ } else {
+ OutputCall(callable, args, args.register_count(), feedback_slot);
+ }
} else {
DCHECK(tail_call_mode == TailCallMode::kAllow);
- Output(Bytecode::kTailCall, RegisterOperand(callable),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot));
+ OutputTailCall(callable, args, args.register_count(), feedback_slot);
}
return *this;
}
@@ -741,10 +861,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
RegisterList args,
int feedback_slot_id) {
- Output(Bytecode::kNew, RegisterOperand(constructor),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot_id));
+ OutputNew(constructor, args, args.register_count(), feedback_slot_id);
return *this;
}
@@ -752,17 +869,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, RegisterList args) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
- Bytecode bytecode;
- uint32_t id;
if (IntrinsicsHelper::IsSupported(function_id)) {
- bytecode = Bytecode::kInvokeIntrinsic;
- id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+ IntrinsicsHelper::IntrinsicId intrinsic_id =
+ IntrinsicsHelper::FromRuntimeId(function_id);
+ OutputInvokeIntrinsic(static_cast<int>(intrinsic_id), args,
+ args.register_count());
} else {
- bytecode = Bytecode::kCallRuntime;
- id = static_cast<uint32_t>(function_id);
+ OutputCallRuntime(static_cast<int>(function_id), args,
+ args.register_count());
}
- Output(bytecode, id, RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()));
return *this;
}
@@ -782,10 +897,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
DCHECK_EQ(2, return_pair.register_count());
- Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- RegisterOperand(return_pair.first_register()));
+ OutputCallRuntimeForPair(static_cast<uint16_t>(function_id), args,
+ args.register_count(), return_pair);
return *this;
}
@@ -797,19 +910,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
RegisterList args) {
- Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()));
+ OutputCallJSRuntime(context_index, args, args.register_count());
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
- Output(Bytecode::kDeletePropertySloppy, RegisterOperand(object));
+ OutputDeletePropertySloppy(object);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kDeletePropertyStrict, RegisterOperand(object));
+ OutputDeletePropertyStrict(object);
}
return *this;
}
@@ -850,88 +961,50 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
}
}
-bool BytecodeArrayBuilder::OperandsAreValid(
- Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3) const {
- if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
- return false;
- }
-
- uint32_t operands[] = {operand0, operand1, operand2, operand3};
- const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
- for (int i = 0; i < operand_count; ++i) {
- switch (operand_types[i]) {
- case OperandType::kNone:
+bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
+ if (reg_list.register_count() == 0) {
+ return reg_list.first_register() == Register(0);
+ } else {
+ int first_reg_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); i++) {
+ if (!RegisterIsValid(Register(first_reg_index + i))) {
return false;
- case OperandType::kFlag8:
- case OperandType::kIntrinsicId:
- if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
- OperandSize::kByte) {
- return false;
- }
- break;
- case OperandType::kRuntimeId:
- if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
- OperandSize::kShort) {
- return false;
- }
- break;
- case OperandType::kIdx:
- // TODO(leszeks): Possibly split this up into constant pool indices and
- // other indices, for checking.
- break;
- case OperandType::kUImm:
- case OperandType::kImm:
- break;
- case OperandType::kRegList: {
- CHECK_LT(i, operand_count - 1);
- CHECK(operand_types[i + 1] == OperandType::kRegCount);
- int reg_count = static_cast<int>(operands[i + 1]);
- if (reg_count == 0) {
- return Register::FromOperand(operands[i]) == Register(0);
- } else {
- Register start = Register::FromOperand(operands[i]);
- Register end(start.index() + reg_count - 1);
- if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
- return false;
- }
- }
- i++; // Skip past kRegCount operand.
- break;
- }
- case OperandType::kReg:
- case OperandType::kRegOut: {
- Register reg = Register::FromOperand(operands[i]);
- if (!RegisterIsValid(reg)) {
- return false;
- }
- break;
}
- case OperandType::kRegOutPair:
- case OperandType::kRegPair: {
- Register reg0 = Register::FromOperand(operands[i]);
- Register reg1 = Register(reg0.index() + 1);
- if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
- return false;
- }
- break;
- }
- case OperandType::kRegOutTriple: {
- Register reg0 = Register::FromOperand(operands[i]);
- Register reg1 = Register(reg0.index() + 1);
- Register reg2 = Register(reg0.index() + 2);
- if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
- !RegisterIsValid(reg2)) {
- return false;
- }
- break;
- }
- case OperandType::kRegCount:
- UNREACHABLE(); // Dealt with in kRegList above.
}
+ return true;
}
+}
+
+void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
+ if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
+ DCHECK(RegisterIsValid(reg));
+ if (register_optimizer_) reg = register_optimizer_->GetInputRegister(reg);
+ return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetOutputRegisterOperand(Register reg) {
+ DCHECK(RegisterIsValid(reg));
+ if (register_optimizer_) register_optimizer_->PrepareOutputRegister(reg);
+ return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterListOperand(
+ RegisterList reg_list) {
+ DCHECK(RegisterListIsValid(reg_list));
+ if (register_optimizer_)
+ reg_list = register_optimizer_->GetInputRegisterList(reg_list);
+ return static_cast<uint32_t>(reg_list.first_register().ToOperand());
+}
- return true;
+uint32_t BytecodeArrayBuilder::GetOutputRegisterListOperand(
+ RegisterList reg_list) {
+ DCHECK(RegisterListIsValid(reg_list));
+ if (register_optimizer_)
+ register_optimizer_->PrepareOutputRegisterList(reg_list);
+ return static_cast<uint32_t>(reg_list.first_register().ToOperand());
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index a9fa7a7bb5..cc5b5e782b 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -6,6 +6,8 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
@@ -24,9 +26,11 @@ namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
class BytecodePipelineStage;
+class BytecodeRegisterOptimizer;
class Register;
-class BytecodeArrayBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeArrayBuilder(
Isolate* isolate, Zone* zone, int parameter_count, int context_count,
@@ -95,6 +99,14 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index,
int depth);
+ // Load from a module variable into the accumulator. |depth| is the depth of
+ // the current context relative to the module context.
+ BytecodeArrayBuilder& LoadModuleVariable(int cell_index, int depth);
+
+ // Store from the accumulator into a module variable. |depth| is the depth of
+ // the current context relative to the module context.
+ BytecodeArrayBuilder& StoreModuleVariable(int cell_index, int depth);
+
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
@@ -183,10 +195,11 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|. The arguments should be in |args|, with the receiver in
- // |args[0]|. Type feedback is recorded in the |feedback_slot| in the type
- // feedback vector.
+ // |args[0]|. The call type of the expression is in |call_type|. Type feedback
+ // is recorded in the |feedback_slot| in the type feedback vector.
BytecodeArrayBuilder& Call(
Register callable, RegisterList args, int feedback_slot,
+ Call::CallType call_type,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
// Call the new operator. The accumulator holds the |new_target|.
@@ -317,6 +330,12 @@ class BytecodeArrayBuilder final : public ZoneObject {
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
+ // Returns the raw operand value for the given register or register list.
+ uint32_t GetInputRegisterOperand(Register reg);
+ uint32_t GetOutputRegisterOperand(Register reg);
+ uint32_t GetInputRegisterListOperand(RegisterList reg_list);
+ uint32_t GetOutputRegisterListOperand(RegisterList reg_list);
+
// Accessors
BytecodeRegisterAllocator* register_allocator() {
return &register_allocator_;
@@ -328,41 +347,22 @@ class BytecodeArrayBuilder final : public ZoneObject {
private:
friend class BytecodeRegisterAllocator;
+ template <OperandType... operand_types>
+ friend class BytecodeNodeBuilder;
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0));
- INLINE(void Output(Bytecode bytecode));
+ // Returns the current source position for the given |bytecode|.
+ INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
- INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
- INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
- BytecodeLabel* label));
+#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
+ template <typename... Operands> \
+ INLINE(void Output##Name(Operands... operands)); \
+ template <typename... Operands> \
+ INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
+ BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
+#undef DECLARE_OPERAND_TYPE_INFO
bool RegisterIsValid(Register reg) const;
- bool OperandsAreValid(Bytecode bytecode, int operand_count,
- uint32_t operand0 = 0, uint32_t operand1 = 0,
- uint32_t operand2 = 0, uint32_t operand3 = 0) const;
-
- static uint32_t RegisterOperand(Register reg) {
- return static_cast<uint32_t>(reg.ToOperand());
- }
-
- static uint32_t SignedOperand(int value) {
- return static_cast<uint32_t>(value);
- }
-
- static uint32_t UnsignedOperand(int value) {
- DCHECK_GE(value, 0);
- return static_cast<uint32_t>(value);
- }
-
- static uint32_t UnsignedOperand(size_t value) {
- DCHECK_LE(value, kMaxUInt32);
- return static_cast<uint32_t>(value);
- }
+ bool RegisterListIsValid(RegisterList reg_list) const;
// Set position for return.
void SetReturnPosition();
@@ -375,6 +375,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
+ void PrepareToOutputBytecode(Bytecode bytecode);
+
void LeaveBasicBlock() { return_seen_in_block_ = false; }
BytecodeArrayWriter* bytecode_array_writer() {
@@ -403,6 +405,7 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodePipelineStage* pipeline_;
+ BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
static int const kNoFeedbackSlot = 0;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 09226252cc..03279cbd43 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#include "src/globals.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -15,7 +16,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator {
public:
explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index fb3876819e..28f997b534 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -94,9 +94,9 @@ void BytecodeArrayWriter::UpdateSourcePositionTable(
int bytecode_offset = static_cast<int>(bytecodes()->size());
const BytecodeSourceInfo& source_info = node->source_info();
if (source_info.is_valid()) {
- source_position_table_builder()->AddPosition(bytecode_offset,
- source_info.source_position(),
- source_info.is_statement());
+ source_position_table_builder()->AddPosition(
+ bytecode_offset, SourcePosition(source_info.source_position()),
+ source_info.is_statement());
}
}
@@ -211,8 +211,6 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kShort, Smi::FromInt(delta));
- DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
- OperandSize::kShort);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
@@ -275,7 +273,7 @@ void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
- DCHECK_EQ(0, node->operand(0));
+ DCHECK_EQ(0u, node->operand(0));
size_t current_offset = bytecodes()->size();
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 712fcb9837..3810ca0847 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
#include "src/source-position-table.h"
@@ -20,7 +22,8 @@ class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
-class BytecodeArrayWriter final : public BytecodePipelineStage {
+class V8_EXPORT_PRIVATE BytecodeArrayWriter final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage) {
public:
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
index 188d610890..7350981c73 100644
--- a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
namespace v8 {
@@ -13,8 +15,9 @@ namespace interpreter {
// An optimization stage for eliminating obviously dead code in bytecode
// generation.
-class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeDeadCodeOptimizer final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
index d1749efb7f..51d0e41ff7 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.h
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -7,6 +7,7 @@
#include <iosfwd>
+#include "src/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -14,7 +15,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeDecoder final {
+class V8_EXPORT_PRIVATE BytecodeDecoder final {
public:
// Decodes a register operand in a byte array.
static Register DecodeRegisterOperand(const uint8_t* operand_start,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index db5a596b85..99e76725d5 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
namespace v8 {
@@ -361,7 +362,7 @@ void BytecodeGenerator::ControlScope::PerformCommand(Command command,
return;
}
current = current->outer();
- if (current->context() != context) {
+ if (current->context() != context && context->ShouldPopContext()) {
// Pop context to the expected depth.
// TODO(rmcilroy): Only emit a single context pop.
generator()->builder()->PopContext(current->context()->reg());
@@ -571,7 +572,11 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
generator_state_(),
loop_depth_(0),
home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
- prototype_string_(info->isolate()->factory()->prototype_string()) {
+ empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
+ AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
+ const AstRawString* prototype_string = ast_value_factory->prototype_string();
+ ast_value_factory->Internalize(info->isolate());
+ prototype_string_ = prototype_string->string();
}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
@@ -678,6 +683,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
+ // Emit initializing assignments for module namespace imports (if any).
+ VisitModuleNamespaceImports();
+
// Perform a stack-check before the body.
builder()->StackCheck(info()->literal()->start_position());
@@ -826,8 +834,9 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::MODULE:
if (variable->IsExport() && variable->binding_needs_init()) {
builder()->LoadTheHole();
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
// Nothing to do for imports.
break;
@@ -846,8 +855,9 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
break;
}
case VariableLocation::CONTEXT: {
@@ -871,19 +881,38 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(variable->mode(), LET);
DCHECK(variable->IsExport());
VisitForAccumulatorValue(decl->fun());
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
break;
}
}
-void BytecodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
+void BytecodeGenerator::VisitModuleNamespaceImports() {
+ if (!scope()->is_module_scope()) return;
+
+ RegisterAllocationScope register_scope(this);
+ Register module_request = register_allocator()->NewRegister();
+
+ ModuleDescriptor* descriptor = scope()->AsModuleScope()->module();
+ for (auto entry : descriptor->namespace_imports()) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(entry->module_request))
+ .StoreAccumulatorInRegister(module_request)
+ .CallRuntime(Runtime::kGetModuleNamespace, module_request);
+ Variable* var = scope()->LookupLocal(entry->local_name);
+ DCHECK_NOT_NULL(var);
+ BuildVariableAssignment(var, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
+ }
+}
+
+void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
RegisterAllocationScope register_scope(this);
DCHECK(globals_builder()->empty());
- for (int i = 0; i < declarations->length(); i++) {
+ for (Declaration* decl : *declarations) {
RegisterAllocationScope register_scope(this);
- Visit(declarations->at(i));
+ Visit(decl);
}
if (globals_builder()->empty()) return;
@@ -1126,8 +1155,9 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
LhsKind assign_type = Property::GetAssignType(property);
switch (assign_type) {
case VARIABLE: {
- Variable* variable = expr->AsVariableProxy()->var();
- VisitVariableAssignment(variable, Token::ASSIGN, slot);
+ VariableProxy* proxy = expr->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -1206,7 +1236,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up loop counter
Register index = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::FromInt(0));
+ builder()->LoadLiteral(Smi::kZero);
builder()->StoreAccumulatorInRegister(index);
// The loop
@@ -1374,11 +1404,12 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
builder()->CallRuntime(Runtime::kToFastProperties, literal);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
- Variable* var = expr->class_variable_proxy()->var();
+ VariableProxy* proxy = expr->class_variable_proxy();
FeedbackVectorSlot slot = expr->NeedsProxySlot()
? expr->ProxySlot()
: FeedbackVectorSlot::Invalid();
- VisitVariableAssignment(var, Token::INIT, slot);
+ BuildVariableAssignment(proxy->var(), Token::INIT, slot,
+ HoleCheckMode::kElided);
}
}
@@ -1541,11 +1572,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
FastCloneShallowObjectStub::IsSupported(expr),
FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
expr->ComputeFlags());
- // Allocate in the outer scope since this register is used to return the
- // expression's results to the caller.
+ // If constant properties is an empty fixed array, use our cached
+ // empty_fixed_array to ensure it's only added to the constant pool once.
+ Handle<FixedArray> constant_properties = expr->properties_count() == 0
+ ? empty_fixed_array()
+ : expr->constant_properties();
Register literal = register_allocator()->NewRegister();
- builder()->CreateObjectLiteral(expr->constant_properties(),
- expr->literal_index(), flags, literal);
+ builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
+ flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1752,17 +1786,13 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
- VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+ BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
}
-void BytecodeGenerator::BuildHoleCheckForVariableLoad(Variable* variable) {
- if (variable->binding_needs_init()) {
- BuildThrowIfHole(variable->name());
- }
-}
-
-void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable,
FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
@@ -1771,7 +1801,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// VisitForRegisterScope, in order to avoid register aliasing if
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::PARAMETER: {
@@ -1782,7 +1814,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// VisitForRegisterScope, in order to avoid register aliasing if
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::UNALLOCATED: {
@@ -1801,7 +1835,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
}
builder()->LoadContextSlot(context_reg, variable->index(), depth);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::LOOKUP: {
@@ -1812,7 +1848,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
execution_context()->ContextChainDepth(local_variable->scope());
builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
local_variable->index(), depth);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case DYNAMIC_GLOBAL: {
@@ -1827,36 +1865,21 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
break;
}
case VariableLocation::MODULE: {
- ModuleDescriptor* descriptor = scope()->GetModuleScope()->module();
- if (variable->IsExport()) {
- auto it = descriptor->regular_exports().find(variable->raw_name());
- DCHECK(it != descriptor->regular_exports().end());
- Register export_name = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(it->second->export_name->string())
- .StoreAccumulatorInRegister(export_name)
- .CallRuntime(Runtime::kLoadModuleExport, export_name);
- } else {
- auto it = descriptor->regular_imports().find(variable->raw_name());
- DCHECK(it != descriptor->regular_imports().end());
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadLiteral(it->second->import_name->string())
- .StoreAccumulatorInRegister(args[0])
- .LoadLiteral(Smi::FromInt(it->second->module_request))
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kLoadModuleImport, args);
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ builder()->LoadModuleVariable(variable->index(), depth);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
}
- BuildHoleCheckForVariableLoad(variable);
break;
}
}
}
-void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot, HoleCheckMode hole_check_mode,
+ TypeofMode typeof_mode) {
ValueResultScope accumulator_result(this);
- VisitVariableLoad(variable, slot, typeof_mode);
+ BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
}
void BytecodeGenerator::BuildReturn() {
@@ -1911,29 +1934,26 @@ void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- if (op != Token::INIT) {
- // Perform an initialization check for let/const declared variables.
- // E.g. let x = (x = 20); is not allowed.
- BuildThrowIfHole(variable->name());
- } else {
- DCHECK(variable->is_this() && variable->mode() == CONST &&
- op == Token::INIT);
+ if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
BuildThrowIfNotHole(variable->name());
+ } else {
+ // Perform an initialization check for let/const declared variables.
+ // E.g. let x = (x = 20); is not allowed.
+ DCHECK(IsLexicalVariableMode(variable->mode()));
+ BuildThrowIfHole(variable->name());
}
}
-void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
VariableMode mode = variable->mode();
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
- bool hole_check_required =
- variable->binding_needs_init() &&
- (op != Token::INIT || (mode == CONST && variable->is_this()));
switch (variable->location()) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1944,7 +1964,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
destination = Register(variable->index());
}
- if (hole_check_required) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
builder()
@@ -1979,7 +1999,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
context_reg = execution_context()->reg();
}
- if (hole_check_required) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
builder()
@@ -2014,18 +2034,16 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// assignments for them.
DCHECK(variable->IsExport());
- ModuleDescriptor* mod = scope()->GetModuleScope()->module();
- // There may be several export names for this local name, but it doesn't
- // matter which one we pick, as they all map to the same cell.
- auto it = mod->regular_exports().find(variable->raw_name());
- DCHECK(it != mod->regular_exports().end());
-
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->StoreAccumulatorInRegister(args[1])
- .LoadLiteral(it->second->export_name->string())
- .StoreAccumulatorInRegister(args[0])
- .CallRuntime(Runtime::kStoreModuleExport, args);
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ Register value_temp = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadModuleVariable(variable->index(), depth);
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ builder()->StoreModuleVariable(variable->index(), depth);
break;
}
}
@@ -2087,7 +2105,8 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
- VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+ BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
builder()->StoreAccumulatorInRegister(old_value);
break;
}
@@ -2136,10 +2155,11 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
FeedbackVectorSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
- // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+ // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
// Is the value in the accumulator safe? Yes, but scary.
- Variable* variable = expr->target()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, expr->op(), slot);
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), expr->op(), slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY:
@@ -2273,10 +2293,12 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
}
}
-void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
- Property* expr) {
+void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
+ Property* expr,
+ Register destination) {
ValueResultScope result_scope(this);
VisitPropertyLoad(obj, expr);
+ builder()->StoreAccumulatorInRegister(destination);
}
void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
@@ -2325,11 +2347,10 @@ void BytecodeGenerator::VisitProperty(Property* expr) {
}
void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
- RegisterList arg_regs,
- size_t first_argument_register) {
+ RegisterList* arg_regs) {
// Visit arguments.
for (int i = 0; i < static_cast<int>(args->length()); i++) {
- VisitForRegisterValue(args->at(i), arg_regs[first_argument_register + i]);
+ VisitAndPushIntoRegisterList(args->at(i), arg_regs);
}
}
@@ -2342,11 +2363,11 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
Register callee = register_allocator()->NewRegister();
-
- // Add an argument register for the receiver.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
- Register receiver = args[0];
+ // Grow the args list as we visit receiver / arguments to avoid allocating all
+ // the registers up-front. Otherwise these registers are unavailable during
+ // receiver / argument visiting and we can end up with memory leaks due to
+ // registers keeping objects alive.
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
@@ -2354,54 +2375,55 @@ void BytecodeGenerator::VisitCall(Call* expr) {
case Call::NAMED_PROPERTY_CALL:
case Call::KEYED_PROPERTY_CALL: {
Property* property = callee_expr->AsProperty();
- VisitForAccumulatorValue(property->obj());
- builder()->StoreAccumulatorInRegister(receiver);
- VisitPropertyLoadForAccumulator(receiver, property);
- builder()->StoreAccumulatorInRegister(callee);
+ VisitAndPushIntoRegisterList(property->obj(), &args);
+ VisitPropertyLoadForRegister(args[0], property, callee);
break;
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ BuildPushUndefinedIntoRegisterList(&args);
// Load callee as a global variable.
VariableProxy* proxy = callee_expr->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot());
+ BuildVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
builder()->StoreAccumulatorInRegister(callee);
break;
}
- case Call::LOOKUP_SLOT_CALL:
- case Call::POSSIBLY_EVAL_CALL: {
- if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+ case Call::WITH_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
+ DCHECK(callee_expr->AsVariableProxy()->var()->IsLookupSlot());
+ {
RegisterAllocationScope inner_register_scope(this);
Register name = register_allocator()->NewRegister();
// Call %LoadLookupSlotForCall to get the callee and receiver.
DCHECK(Register::AreContiguous(callee, receiver));
RegisterList result_pair(callee.index(), 2);
+ USE(receiver);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
result_pair);
- break;
}
- // Fall through.
- DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+ break;
}
case Call::OTHER_CALL: {
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ BuildPushUndefinedIntoRegisterList(&args);
VisitForRegisterValue(callee_expr, callee);
break;
}
case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
Property* property = callee_expr->AsProperty();
VisitNamedSuperPropertyLoad(property, receiver);
builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
Property* property = callee_expr->AsProperty();
VisitKeyedSuperPropertyLoad(property, receiver);
builder()->StoreAccumulatorInRegister(callee);
@@ -2414,12 +2436,12 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Evaluate all arguments to the function call and store in sequential args
// registers.
- VisitArguments(expr->arguments(), args, 1);
+ VisitArguments(expr->arguments(), &args);
+ CHECK_EQ(expr->arguments()->length() + 1, args.register_count());
// Resolve callee for a potential direct eval call. This block will mutate the
// callee value.
- if (call_type == Call::POSSIBLY_EVAL_CALL &&
- expr->arguments()->length() > 0) {
+ if (expr->is_possibly_eval() && expr->arguments()->length() > 0) {
RegisterAllocationScope inner_register_scope(this);
// Set up arguments for ResolvePossiblyDirectEval by copying callee, source
// strings and function closure, and loading language and
@@ -2445,18 +2467,9 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->SetExpressionPosition(expr);
- int feedback_slot_index;
- if (expr->CallFeedbackICSlot().IsInvalid()) {
- DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
- // Valid type feedback slots can only be greater than kReservedIndexCount.
- // We use 0 to indicate an invalid slot id. Statically assert that 0 cannot
- // be a valid slot id.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- feedback_slot_index = 0;
- } else {
- feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- }
- builder()->Call(callee, args, feedback_slot_index, expr->tail_call_mode());
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->Call(callee, args, feedback_slot_index, call_type,
+ expr->tail_call_mode());
}
void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2470,9 +2483,8 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
Register constructor = this_function; // Re-use dead this_function register.
builder()->StoreAccumulatorInRegister(constructor);
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
// The new target is loaded into the accumulator from the
// {new.target} variable.
@@ -2480,20 +2492,20 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// Call construct.
builder()->SetExpressionPosition(expr);
- // Valid type feedback slots can only be greater than kReservedIndexCount.
- // Assert that 0 cannot be valid a valid slot id.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- // Type feedback is not necessary for super constructor calls. The type
- // information can be inferred in most cases. Slot id 0 indicates type
- // feedback is not required.
- builder()->New(constructor, args, 0);
+ // TODO(turbofan): For now we do gather feedback on super constructor
+ // calls, utilizing the existing machinery to inline the actual call
+ // target and the JSCreate for the implicit receiver allocation. This
+ // is not an ideal solution for super constructor calls, but it gets
+ // the job done for now. In the long run we might want to revisit this
+ // and come up with a better way.
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->New(constructor, args, feedback_slot_index);
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
Register constructor = VisitForRegisterValue(expr->expression());
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
builder()->SetExpressionPosition(expr);
// The accumulator holds new target which is the same as the
@@ -2505,18 +2517,15 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
// Allocate a register for the receiver and load it with undefined.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
- Register receiver = args[0];
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
- VisitArguments(expr->arguments(), args, 1);
+ BuildPushUndefinedIntoRegisterList(&args);
+ VisitArguments(expr->arguments(), &args);
builder()->CallJSRuntime(expr->context_index(), args);
} else {
// Evaluate all arguments to the runtime call.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
Runtime::FunctionId function_id = expr->function()->function_id;
builder()->CallRuntime(function_id, args);
}
@@ -2532,8 +2541,9 @@ void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
// Typeof does not throw a reference error on global variables, hence we
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(
- proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+ BuildVariableLoadForAccumulatorValue(
+ proxy->var(), proxy->VariableFeedbackSlot(), proxy->hole_check_mode(),
+ INSIDE_TYPEOF);
} else {
VisitForAccumulatorValue(expr->expression());
}
@@ -2657,8 +2667,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot());
+ BuildVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2709,7 +2720,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
if (is_postfix) {
// Convert old value into a number before saving it.
old_value = register_allocator()->NewRegister();
- builder()->ConvertAccumulatorToNumber(old_value);
+ builder()
+ ->ConvertAccumulatorToNumber(old_value)
+ .LoadAccumulatorWithRegister(old_value);
}
// Perform +1/-1 operation.
@@ -2721,8 +2734,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
FeedbackVectorSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
- Variable* variable = expr->expression()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, expr->op(), feedback_slot);
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), expr->op(), feedback_slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2821,7 +2835,7 @@ void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
- if (left->ToBooleanIsTrue() || right->ToBooleanIsTrue()) {
+ if (left->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
} else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
@@ -2856,7 +2870,7 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
- if (left->ToBooleanIsFalse() || right->ToBooleanIsFalse()) {
+ if (left->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
} else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
@@ -3019,8 +3033,9 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
? CreateArgumentsType::kUnmappedArguments
: CreateArgumentsType::kMappedArguments;
builder()->CreateArguments(type);
- VisitVariableAssignment(variable, Token::ASSIGN,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::ASSIGN,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
@@ -3030,7 +3045,8 @@ void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
// variable.
builder()->CreateArguments(CreateArgumentsType::kRestParameter);
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
@@ -3038,7 +3054,8 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
- VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
@@ -3046,7 +3063,8 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::new_target());
- VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
// TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
// and we need to make sure {BytecodeRegisterOptimizer} flushes its state
@@ -3120,6 +3138,28 @@ void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
builder()->StoreAccumulatorInRegister(destination);
}
+// Visits the expression |expr| and pushes the result into a new register
+// added to the end of |reg_list|.
+void BytecodeGenerator::VisitAndPushIntoRegisterList(Expression* expr,
+ RegisterList* reg_list) {
+ {
+ ValueResultScope register_scope(this);
+ Visit(expr);
+ }
+ // Grow the register list after visiting the expression to avoid reserving
+ // the register across the expression evaluation, which could cause memory
+ // leaks for deep expressions due to dead objects being kept alive by pointers
+ // in registers.
+ Register destination = register_allocator()->GrowRegisterList(reg_list);
+ builder()->StoreAccumulatorInRegister(destination);
+}
+
+void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
+ RegisterList* reg_list) {
+ Register reg = register_allocator()->GrowRegisterList(reg_list);
+ builder()->LoadUndefined().StoreAccumulatorInRegister(reg);
+}
+
// Visits the expression |expr| for testing its boolean value and jumping to the
// |then| or |other| label depending on value and short-circuit semantics
void BytecodeGenerator::VisitForTest(Expression* expr,
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 03067de08d..bcab9975d0 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(ZoneList<Statement*>* statments);
private:
@@ -72,10 +72,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Used by flow control routines to evaluate loop condition.
void VisitCondition(Expression* expr);
- // Visit the arguments expressions in |args| and store them in |args_regs|
- // starting at register |first_argument_register| in the list.
- void VisitArguments(ZoneList<Expression*>* args, RegisterList arg_regs,
- size_t first_argument_register = 0);
+ // Visit the arguments expressions in |args| and store them in |args_regs|,
+ // growing |args_regs| for each argument visited.
+ void VisitArguments(ZoneList<Expression*>* args, RegisterList* arg_regs);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -92,18 +91,19 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register opt_receiver_out);
void VisitPropertyLoad(Register obj, Property* expr);
- void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+ void VisitPropertyLoadForRegister(Register obj, Property* expr,
+ Register destination);
- void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ void BuildVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void VisitVariableLoadForAccumulatorValue(
+ void BuildVariableLoadForAccumulatorValue(
Variable* variable, FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- MUST_USE_RESULT Register
- VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void VisitVariableAssignment(Variable* variable, Token::Value op,
- FeedbackVectorSlot slot);
+ void BuildVariableAssignment(Variable* variable, Token::Value op,
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode);
void BuildReturn();
void BuildReThrow();
@@ -111,7 +111,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildThrowIfHole(Handle<String> name);
void BuildThrowIfNotHole(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
- void BuildHoleCheckForVariableLoad(Variable* variable);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
// Build jump to targets[value], where
@@ -143,6 +142,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ObjectLiteralProperty* property,
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+ void VisitModuleNamespaceImports();
// Visit the header/body of a loop iteration.
void VisitIterationHeader(IterationStatement* stmt,
@@ -152,12 +152,15 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visit a statement and switch scopes, the context is in the accumulator.
void VisitInScope(Statement* stmt, Scope* scope);
+ void BuildPushUndefinedIntoRegisterList(RegisterList* reg_list);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect.
void VisitForAccumulatorValue(Expression* expr);
void VisitForAccumulatorValueOrTheHole(Expression* expr);
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
void VisitForRegisterValue(Expression* expr, Register destination);
+ void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
void VisitForEffect(Expression* expr);
void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
@@ -194,6 +197,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Handle<Name> home_object_symbol() const { return home_object_symbol_; }
Handle<Name> prototype_string() const { return prototype_string_; }
+ Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
Zone* zone_;
BytecodeArrayBuilder* builder_;
@@ -216,6 +220,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Handle<Name> home_object_symbol_;
Handle<Name> prototype_string_;
+ Handle<FixedArray> empty_fixed_array_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index b35c4866be..55485027d3 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -14,8 +14,8 @@ namespace interpreter {
#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
- V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(Reg, OperandTypeInfo::kScalableSignedByte) \
+ V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(RegPair, OperandTypeInfo::kScalableSignedByte)
#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
@@ -23,22 +23,25 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-#define SCALAR_OPERAND_TYPE_LIST(V) \
+#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
- V(Imm, OperandTypeInfo::kScalableSignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte)
+
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
- SCALAR_OPERAND_TYPE_LIST(V)
+ UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+ SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
// The list of operand types used by bytecodes.
#define OPERAND_TYPE_LIST(V) \
@@ -114,9 +117,12 @@ inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
return static_cast<AccumulatorUse>(result);
}
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const AccumulatorUse& use);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const OperandScale& operand_scale);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
index c87d31c39f..40552943f7 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,7 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
- : next_stage_(next_stage), last_(Bytecode::kIllegal) {
+ : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
InvalidateLast();
}
@@ -77,8 +77,7 @@ void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
// source position information. NOP without source information can
// always be elided.
DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
-
- last_.Clone(node);
+ last_ = *node;
}
bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
@@ -117,24 +116,6 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace {
-void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
- BytecodeNode* const current) {
- DCHECK_EQ(current->bytecode(), Bytecode::kStar);
-
- //
- // An example transformation here would be:
- //
- // LdaGlobal i0, i1 ____\ LdrGlobal i0, i1, R
- // Star R ====/ Ldar R
- //
- // which loads a global value into both a register and the
- // accumulator. However, in the second form the Ldar can often be
- // peephole optimized away unlike the Star in the first form.
- //
- last->Transform(new_bytecode, current->operand(0));
- current->set_bytecode(Bytecode::kLdar, current->operand(0));
-}
-
void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
BytecodeNode* const last,
BytecodeNode* const current) {
@@ -142,7 +123,7 @@ void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info_ptr()->Clone(last->source_info());
+ current->set_source_info(last->source_info());
}
}
@@ -153,7 +134,7 @@ void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, 0, current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info_ptr()->Clone(last->source_info());
+ current->set_source_info(last->source_info());
}
}
@@ -223,7 +204,7 @@ void BytecodePeepholeOptimizer::ElideLastAction(
// |node| can not have a valid source position if the source
// position of last() is valid (per rules in
// CanElideLastBasedOnSourcePosition()).
- node->source_info_ptr()->Clone(last()->source_info());
+ node->set_source_info(last()->source_info());
}
SetLast(node);
} else {
@@ -240,17 +221,6 @@ void BytecodePeepholeOptimizer::ChangeBytecodeAction(
DefaultAction(node);
}
-void BytecodePeepholeOptimizer::TransformLdaStarToLdrLdarAction(
- BytecodeNode* const node, const PeepholeActionAndData* action_data) {
- DCHECK(LastIsValid());
- DCHECK(!Bytecodes::IsJump(node->bytecode()));
-
- if (!node->source_info().is_statement()) {
- TransformLdaStarToLdrLdar(action_data->bytecode, last(), node);
- }
- DefaultAction(node);
-}
-
void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
BytecodeNode* const node, const PeepholeActionAndData* action_data) {
DCHECK(LastIsValid());
@@ -314,7 +284,7 @@ void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
if (!CanElideLastBasedOnSourcePosition(node)) {
next_stage()->Write(last());
} else if (!node->source_info().is_valid()) {
- node->source_info_ptr()->Clone(last()->source_info());
+ node->set_source_info(last()->source_info());
}
InvalidateLast();
}
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
index cedd742f87..7e7e02a370 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-peephole-table.h"
#include "src/interpreter/bytecode-pipeline.h"
@@ -17,8 +19,9 @@ class BytecodePeepholeActionAndData;
// An optimization stage for performing peephole optimizations on
// generated bytecode. The optimizer may buffer one bytecode
// internally.
-class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodePeepholeOptimizer final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit BytecodePeepholeOptimizer(BytecodePipelineStage* next_stage);
diff --git a/deps/v8/src/interpreter/bytecode-peephole-table.h b/deps/v8/src/interpreter/bytecode-peephole-table.h
index e716aef496..1790f5a109 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-table.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-table.h
@@ -19,7 +19,6 @@ namespace interpreter {
V(ElideCurrentIfOperand0MatchesAction) \
V(ElideLastAction) \
V(ChangeBytecodeAction) \
- V(TransformLdaStarToLdrLdarAction) \
V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-pipeline.cc
index 6e6a6b6fab..06accd75dc 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.cc
+++ b/deps/v8/src/interpreter/bytecode-pipeline.cc
@@ -11,19 +11,6 @@ namespace v8 {
namespace internal {
namespace interpreter {
-BytecodeNode::BytecodeNode(const BytecodeNode& other) {
- memcpy(this, &other, sizeof(other));
-}
-
-BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
- memcpy(this, &other, sizeof(other));
- return *this;
-}
-
-void BytecodeNode::Clone(const BytecodeNode* const other) {
- memcpy(this, other, sizeof(*other));
-}
-
void BytecodeNode::Print(std::ostream& os) const {
#ifdef DEBUG
std::ios saved_state(nullptr);
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
index 0b1a1f1bf3..d508defea0 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -93,14 +95,6 @@ class BytecodeSourceInfo final {
source_position_ = source_position;
}
- // Clones a source position. The current instance is expected to be
- // invalid.
- void Clone(const BytecodeSourceInfo& other) {
- DCHECK(!is_valid());
- position_type_ = other.position_type_;
- source_position_ = other.source_position_;
- }
-
int source_position() const {
DCHECK(is_valid());
return source_position_;
@@ -138,81 +132,79 @@ class BytecodeSourceInfo final {
// A container for a generated bytecode, it's operands, and source information.
// These must be allocated by a BytecodeNodeAllocator instance.
-class BytecodeNode final : ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
public:
- INLINE(BytecodeNode(const Bytecode bytecode,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(0),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(1),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(2),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(3),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2, uint32_t operand3,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(4),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
SetOperand(3, operand3);
- AttachSourceInfo(source_info);
}
- BytecodeNode(const BytecodeNode& other);
- BytecodeNode& operator=(const BytecodeNode& other);
-
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
Bytecodes::NumberOfOperands(bytecode));
bytecode_ = bytecode;
}
+
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
operand_count_ = 0;
operand_scale_ = OperandScale::kSingle;
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
@@ -220,6 +212,7 @@ class BytecodeNode final : ZoneObject {
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
@@ -228,6 +221,7 @@ class BytecodeNode final : ZoneObject {
SetOperand(0, operand0);
SetOperand(1, operand1);
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
@@ -239,9 +233,6 @@ class BytecodeNode final : ZoneObject {
SetOperand(2, operand2);
}
- // Clone |other|.
- void Clone(const BytecodeNode* const other);
-
// Print to stream |os|.
void Print(std::ostream& os) const;
@@ -266,18 +257,6 @@ class BytecodeNode final : ZoneObject {
SetOperand(operand_count() - 1, extra_operand);
}
- // Updates the operand at |operand_index| to |operand|.
- void UpdateOperand(int operand_index, uint32_t operand) {
- DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
- operands_[operand_index] = operand;
- if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
- Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
- (Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
- Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
- UpdateScale();
- }
- }
-
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
@@ -290,27 +269,14 @@ class BytecodeNode final : ZoneObject {
OperandScale operand_scale() const { return operand_scale_; }
const BytecodeSourceInfo& source_info() const { return source_info_; }
- BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
+ void set_source_info(BytecodeSourceInfo source_info) {
+ source_info_ = source_info;
+ }
bool operator==(const BytecodeNode& other) const;
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
- INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
- if (source_info && source_info->is_valid()) {
- // Statement positions need to be emitted immediately. Expression
- // positions can be pushed back until a bytecode is found that can
- // throw (if expression position filtering is turned on). We only
- // invalidate the existing source position information if it is used.
- if (source_info->is_statement() ||
- !FLAG_ignition_filter_expression_positions ||
- !Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
- source_info_.Clone(*source_info);
- source_info->set_invalid();
- }
- }
- }
-
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
@@ -327,13 +293,6 @@ class BytecodeNode final : ZoneObject {
UpdateScaleForOperand(operand_index, operand);
}
- void UpdateScale() {
- operand_scale_ = OperandScale::kSingle;
- for (int i = 0; i < operand_count(); i++) {
- UpdateScaleForOperand(i, operands_[i]);
- }
- }
-
Bytecode bytecode_;
uint32_t operands_[Bytecodes::kMaxOperands];
int operand_count_;
@@ -341,8 +300,10 @@ class BytecodeNode final : ZoneObject {
BytecodeSourceInfo source_info_;
};
-std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
-std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeSourceInfo& info);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeNode& node);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index e9de4661d3..72e0133f43 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -52,6 +52,27 @@ class BytecodeRegisterAllocator final {
return reg_list;
}
+ // Returns a growable register list.
+ RegisterList NewGrowableRegisterList() {
+ RegisterList reg_list(next_register_index_, 0);
+ return reg_list;
+ }
+
+ // Appends a new register to |reg_list| increasing it's count by one and
+ // returning the register added.
+ //
+ // Note: no other new registers must be currently allocated since the register
+ // list was originally allocated.
+ Register GrowRegisterList(RegisterList* reg_list) {
+ Register reg(NewRegister());
+ reg_list->IncrementRegisterCount();
+ // If the following CHECK fails then a register was allocated (and not
+ // freed) between the creation of the RegisterList and this call to add a
+ // Register.
+ CHECK_EQ(reg.index(), reg_list->last_register().index());
+ return reg;
+ }
+
// Release all registers above |register_index|.
void ReleaseRegisters(int register_index) {
if (observer_) {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index acbe0ba5a1..563956e5c6 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -8,7 +8,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId = kMaxUInt32;
// A class for tracking the state of a register. This class tracks
// which equivalence set a register is a member of and also whether a
@@ -230,81 +230,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK(accumulator_info_->register_value() == accumulator_);
}
-// override
-Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) {
- FlushState();
- return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
- parameter_count, handler_table);
-}
-
-// override
-void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
- // Jumps are handled by WriteJump.
- DCHECK(!Bytecodes::IsJump(node->bytecode()));
- //
- // Transfers with observable registers as the destination will be
- // immediately materialized so the source position information will
- // be ordered correctly.
- //
- // Transfers without observable destination registers will initially
- // be emitted as Nop's with the source position. They may, or may
- // not, be materialized by the optimizer. However, the source
- // position is not lost and being attached to a Nop is fine as the
- // destination register is not observable in the debugger.
- //
- switch (node->bytecode()) {
- case Bytecode::kLdar: {
- DoLdar(node);
- return;
- }
- case Bytecode::kStar: {
- DoStar(node);
- return;
- }
- case Bytecode::kMov: {
- DoMov(node);
- return;
- }
- default:
- break;
- }
-
- if (node->bytecode() == Bytecode::kDebugger ||
- node->bytecode() == Bytecode::kSuspendGenerator) {
- // All state must be flushed before emitting
- // - a call to the debugger (as it can manipulate locals and parameters),
- // - a generator suspend (as this involves saving all registers).
- FlushState();
- }
-
- PrepareOperands(node);
- next_stage_->Write(node);
-}
-
-// override
-void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
- BytecodeLabel* label) {
- FlushState();
- next_stage_->WriteJump(node, label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
- FlushState();
- next_stage_->BindLabel(label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
- BytecodeLabel* label) {
- // There is no need to flush here, it will have been flushed when |target|
- // was bound.
- next_stage_->BindLabel(target, label);
-}
-
-void BytecodeRegisterOptimizer::FlushState() {
+void BytecodeRegisterOptimizer::Flush() {
if (!flush_required_) {
return;
}
@@ -332,7 +258,7 @@ void BytecodeRegisterOptimizer::FlushState() {
void BytecodeRegisterOptimizer::OutputRegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- BytecodeSourceInfo* source_info) {
+ BytecodeSourceInfo source_info) {
Register input = input_info->register_value();
Register output = output_info->register_value();
DCHECK_NE(input.index(), output.index());
@@ -404,7 +330,7 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- BytecodeSourceInfo* source_info) {
+ BytecodeSourceInfo source_info) {
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@@ -423,7 +349,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
output_info->set_materialized(false);
RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
OutputRegisterTransfer(materialized_info, output_info, source_info);
- } else if (source_info->is_valid()) {
+ } else if (source_info.is_valid()) {
// Emit a placeholder nop to maintain source position info.
EmitNopForSourceInfo(source_info);
}
@@ -437,60 +363,32 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
}
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
- BytecodeSourceInfo* source_info) const {
- DCHECK(source_info->is_valid());
+ BytecodeSourceInfo source_info) const {
+ DCHECK(source_info.is_valid());
BytecodeNode nop(Bytecode::kNop, source_info);
next_stage_->Write(&nop);
}
-void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
- Register input = GetRegisterInputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* input_info = GetRegisterInfo(input);
- RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
- Register input = GetRegisterInputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* input_info = GetRegisterInfo(input);
- Register output = GetRegisterOutputOperand(
- 1, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetRegisterInfo(output);
- RegisterTransfer(input_info, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
- Register output = GetRegisterOutputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetRegisterInfo(output);
- RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
- RegisterInfo* reg_info) {
+void BytecodeRegisterOptimizer::PrepareOutputRegister(Register reg) {
+ RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
CreateMaterializedEquivalent(reg_info);
}
+ reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
max_register_index_ =
std::max(max_register_index_, reg_info->register_value().index());
- reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
-void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
- Register start, int count) {
- for (int i = 0; i < count; ++i) {
- Register reg(start.index() + i);
- RegisterInfo* reg_info = GetRegisterInfo(reg);
- PrepareRegisterOutputOperand(reg_info);
+void BytecodeRegisterOptimizer::PrepareOutputRegisterList(
+ RegisterList reg_list) {
+ int start_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); ++i) {
+ Register current(start_index + i);
+ PrepareOutputRegister(current);
}
}
-Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
- Register reg) {
- // For a temporary register, RegInfo state may need be created. For
- // locals and parameters, the RegInfo state is created in the
- // BytecodeRegisterOptimizer constructor.
+Register BytecodeRegisterOptimizer::GetInputRegister(Register reg) {
RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
return reg;
@@ -501,124 +399,49 @@ Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
}
}
-void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
- BytecodeNode* const node, Register reg, int operand_index) {
- Register equivalent = GetEquivalentRegisterForInputOperand(reg);
- node->UpdateOperand(operand_index,
- static_cast<uint32_t>(equivalent.ToOperand()));
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
- int count) {
- for (int i = 0; i < count; ++i) {
- Register current(start.index() + i);
- RegisterInfo* input_info = GetRegisterInfo(current);
- Materialize(input_info);
+RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
+ RegisterList reg_list) {
+ if (reg_list.register_count() == 1) {
+ // If there is only a single register, treat it as a normal input register.
+ Register reg(GetInputRegister(reg_list.first_register()));
+ return RegisterList(reg.index(), 1);
+ } else {
+ int start_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); ++i) {
+ Register current(start_index + i);
+ RegisterInfo* input_info = GetRegisterInfo(current);
+ Materialize(input_info);
+ }
+ return reg_list;
}
}
-void BytecodeRegisterOptimizer::PrepareRegisterOperands(
- BytecodeNode* const node) {
- //
- // For each input operand, get a materialized equivalent if it is
- // just a single register, otherwise materialize register range.
- // Update operand_scale if necessary.
- //
- // For each output register about to be clobbered, materialize an
- // equivalent if it exists. Put each register in it's own equivalence set.
- //
- const uint32_t* operands = node->operands();
- int operand_count = node->operand_count();
- const OperandType* operand_types =
- Bytecodes::GetOperandTypes(node->bytecode());
- for (int i = 0; i < operand_count; ++i) {
- int count;
- if (operand_types[i] == OperandType::kRegList) {
- DCHECK_LT(i, operand_count - 1);
- DCHECK(operand_types[i + 1] == OperandType::kRegCount);
- count = static_cast<int>(operands[i + 1]);
- } else {
- count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
- }
-
- if (count == 0) {
- continue;
- }
-
- Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
- if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
- if (count == 1) {
- PrepareRegisterInputOperand(node, reg, i);
- } else if (count > 1) {
- PrepareRegisterRangeInputOperand(reg, count);
- }
- } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
- PrepareRegisterRangeOutputOperand(reg, count);
- }
+void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
+ if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator) {
+ // All state must be flushed before emitting
+ // - a jump bytecode (as the register equivalents at the jump target aren't
+ // known.
+ // - a call to the debugger (as it can manipulate locals and parameters),
+ // - a generator suspend (as this involves saving all registers).
+ Flush();
}
-}
-void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
- if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+ if (Bytecodes::ReadsAccumulator(bytecode) &&
!accumulator_info_->materialized()) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
- if (Bytecodes::WritesAccumulator(node->bytecode())) {
- PrepareRegisterOutputOperand(accumulator_info_);
+ if (Bytecodes::WritesAccumulator(bytecode)) {
+ PrepareOutputRegister(accumulator_);
}
}
-void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
- PrepareAccumulator(node);
- PrepareRegisterOperands(node);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
- int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
- DCHECK_LT(index, operand_count);
- DCHECK(Bytecodes::IsRegisterInputOperandType(
- Bytecodes::GetOperandType(bytecode, index)));
- return OperandToRegister(operands[index]);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
- int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
- DCHECK_LT(index, operand_count);
- DCHECK(Bytecodes::IsRegisterOutputOperandType(
- Bytecodes::GetOperandType(bytecode, index)));
- return OperandToRegister(operands[index]);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- DCHECK_LT(index, register_info_table_.size());
- return register_info_table_[index];
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- return index < register_info_table_.size() ? register_info_table_[index]
- : NewRegisterInfo(reg);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- DCHECK_GE(index, register_info_table_.size());
- GrowRegisterMap(reg);
- return register_info_table_[index];
-}
-
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index eda22e5f4d..e2a02cf594 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
namespace v8 {
@@ -15,10 +17,9 @@ namespace interpreter {
// registers. The bytecode generator uses temporary registers
// liberally for correctness and convenience and this stage removes
// transfers that are not required and preserves correctness.
-class BytecodeRegisterOptimizer final
- : public BytecodePipelineStage,
- public BytecodeRegisterAllocator::Observer,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
+ : public NON_EXPORTED_BASE(BytecodeRegisterAllocator::Observer),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeRegisterOptimizer(Zone* zone,
BytecodeRegisterAllocator* register_allocator,
@@ -26,17 +27,44 @@ class BytecodeRegisterOptimizer final
BytecodePipelineStage* next_stage);
virtual ~BytecodeRegisterOptimizer() {}
- // BytecodePipelineStage interface.
- void Write(BytecodeNode* node) override;
- void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
- void BindLabel(BytecodeLabel* label) override;
- void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
- Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) override;
+ // Perform explicit register transfer operations.
+ void DoLdar(Register input, BytecodeSourceInfo source_info) {
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ RegisterTransfer(input_info, accumulator_info_, source_info);
+ }
+ void DoStar(Register output, BytecodeSourceInfo source_info) {
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(accumulator_info_, output_info, source_info);
+ }
+ void DoMov(Register input, Register output, BytecodeSourceInfo source_info) {
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(input_info, output_info, source_info);
+ }
+
+ // Materialize all live registers and flush equivalence sets.
+ void Flush();
+
+ // Prepares for |bytecode|.
+ void PrepareForBytecode(Bytecode bytecode);
+
+ // Prepares |reg| for being used as an output operand.
+ void PrepareOutputRegister(Register reg);
+
+ // Prepares registers in |reg_list| for being used as an output operand.
+ void PrepareOutputRegisterList(RegisterList reg_list);
+
+ // Returns an equivalent register to |reg| to be used as an input operand.
+ Register GetInputRegister(Register reg);
+
+ // Returns an equivalent register list to |reg_list| to be used as an input
+ // operand.
+ RegisterList GetInputRegisterList(RegisterList reg_list);
+
+ int maxiumum_register_index() const { return max_register_index_; }
private:
- static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+ static const uint32_t kInvalidEquivalenceId;
class RegisterInfo;
@@ -45,48 +73,20 @@ class BytecodeRegisterOptimizer final
void RegisterListAllocateEvent(RegisterList reg_list) override;
void RegisterListFreeEvent(RegisterList reg) override;
- // Helpers for BytecodePipelineStage interface.
- void FlushState();
-
// Update internal state for register transfer from |input| to
// |output| using |source_info| as source position information if
// any bytecodes are emitted due to transfer.
void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
- BytecodeSourceInfo* source_info);
+ BytecodeSourceInfo source_info);
// Emit a register transfer bytecode from |input| to |output|.
- void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
- BytecodeSourceInfo* source_info = nullptr);
+ void OutputRegisterTransfer(
+ RegisterInfo* input, RegisterInfo* output,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo());
// Emits a Nop to preserve source position information in the
// bytecode pipeline.
- void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
-
- // Handlers for bytecode nodes for register to register transfers.
- void DoLdar(BytecodeNode* node);
- void DoMov(BytecodeNode* node);
- void DoStar(BytecodeNode* node);
-
- // Operand processing methods for bytecodes other than those
- // performing register to register transfers.
- void PrepareOperands(BytecodeNode* const node);
- void PrepareAccumulator(BytecodeNode* const node);
- void PrepareRegisterOperands(BytecodeNode* const node);
-
- void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
- void PrepareRegisterRangeOutputOperand(Register start, int count);
- void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
- int operand_index);
- void PrepareRegisterRangeInputOperand(Register start, int count);
-
- Register GetEquivalentRegisterForInputOperand(Register reg);
-
- static Register GetRegisterInputOperand(int index, Bytecode bytecode,
- const uint32_t* operands,
- int operand_count);
- static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
- const uint32_t* operands,
- int operand_count);
+ void EmitNopForSourceInfo(BytecodeSourceInfo source_info) const;
void CreateMaterializedEquivalent(RegisterInfo* info);
RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
@@ -96,9 +96,23 @@ class BytecodeRegisterOptimizer final
RegisterInfo* non_set_member);
// Methods for finding and creating metadata for each register.
- RegisterInfo* GetOrCreateRegisterInfo(Register reg);
- RegisterInfo* GetRegisterInfo(Register reg);
- RegisterInfo* NewRegisterInfo(Register reg);
+ RegisterInfo* GetRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_LT(index, register_info_table_.size());
+ return register_info_table_[index];
+ }
+ RegisterInfo* GetOrCreateRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ return index < register_info_table_.size() ? register_info_table_[index]
+ : NewRegisterInfo(reg);
+ }
+ RegisterInfo* NewRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_GE(index, register_info_table_.size());
+ GrowRegisterMap(reg);
+ return register_info_table_[index];
+ }
+
void GrowRegisterMap(Register reg);
bool RegisterIsTemporary(Register reg) const {
@@ -123,7 +137,8 @@ class BytecodeRegisterOptimizer final
uint32_t NextEquivalenceId() {
equivalence_id_++;
- CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+ // TODO(rmcilroy): use the same type for these and remove static_cast.
+ CHECK_NE(static_cast<size_t>(equivalence_id_), kInvalidEquivalenceId);
return equivalence_id_;
}
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index d698d4049d..554bc23a5b 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -8,6 +8,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -15,7 +16,7 @@ namespace interpreter {
// An interpreter Register which is located in the function's Register file
// in its stack-frame. Register hold parameters, this, and expression values.
-class Register final {
+class V8_EXPORT_PRIVATE Register final {
public:
explicit Register(int index = kInvalidIndex) : index_(index) {}
@@ -104,6 +105,9 @@ class RegisterList {
RegisterList(int first_reg_index, int register_count)
: first_reg_index_(first_reg_index), register_count_(register_count) {}
+ // Increases the size of the register list by one.
+ void IncrementRegisterCount() { register_count_++; }
+
// Returns a new RegisterList which is a truncated version of this list, with
// |count| registers.
const RegisterList Truncate(int new_count) {
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index c58f4685a2..15c4e98a02 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -211,6 +211,12 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kLdaNull:
case Bytecode::kLdaTheHole:
case Bytecode::kLdaConstant:
+ case Bytecode::kLdaUndefined:
+ case Bytecode::kLdaGlobal:
+ case Bytecode::kLdaNamedProperty:
+ case Bytecode::kLdaKeyedProperty:
+ case Bytecode::kLdaContextSlot:
+ case Bytecode::kLdaCurrentContextSlot:
case Bytecode::kAdd:
case Bytecode::kSub:
case Bytecode::kMul:
@@ -220,6 +226,7 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kDec:
case Bytecode::kTypeOf:
case Bytecode::kCall:
+ case Bytecode::kCallProperty:
case Bytecode::kNew:
return true;
default:
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 6232966bbc..23d77f0c33 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -37,12 +37,8 @@ namespace interpreter {
V(LdaFalse, AccumulatorUse::kWrite) \
V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
\
- /* Loading registers */ \
- V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut) \
- \
/* Globals */ \
V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
OperandType::kIdx) \
@@ -54,10 +50,10 @@ namespace interpreter {
V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
- V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kIdx, OperandType::kUImm, OperandType::kRegOut) \
+ V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
+ V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx) \
\
/* Load-Store lookup slots */ \
V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
@@ -83,12 +79,14 @@ namespace interpreter {
/* Property loads (LoadIC) operations */ \
V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
- V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut) \
V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
- V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx, OperandType::kRegOut) \
+ \
+ /* Operations on module variables */ \
+ V(LdaModuleVariable, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kUImm) \
+ V(StaModuleVariable, AccumulatorUse::kRead, OperandType::kImm, \
+ OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
@@ -145,6 +143,8 @@ namespace interpreter {
/* Call operations */ \
V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
+ V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
@@ -314,7 +314,7 @@ enum class Bytecode : uint8_t {
#define CONSTEXPR constexpr
#endif
-class Bytecodes final {
+class V8_EXPORT_PRIVATE Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
static const int kMaxOperands = 4;
@@ -422,15 +422,16 @@ class Bytecodes final {
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
bytecode == Bytecode::kLdaUndefined ||
bytecode == Bytecode::kLdaTheHole ||
- bytecode == Bytecode::kLdaConstant;
+ bytecode == Bytecode::kLdaConstant ||
+ bytecode == Bytecode::kLdaContextSlot ||
+ bytecode == Bytecode::kLdaCurrentContextSlot;
}
// Return true if |bytecode| is a register load without effects,
- // e.g. Mov, Star, LdrUndefined.
+ // e.g. Mov, Star.
static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
- bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar ||
- bytecode == Bytecode::kLdrUndefined;
+ bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
}
// Returns true if the bytecode is a conditional jump taking
@@ -525,8 +526,8 @@ class Bytecodes final {
// Returns true if the bytecode is a call or a constructor call.
static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
- return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
- bytecode == Bytecode::kNew;
+ return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
+ bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
}
// Returns true if the bytecode is a call to the runtime.
@@ -733,7 +734,8 @@ class Bytecodes final {
// See crbug.com/603131.
#undef CONSTEXPR
-std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const Bytecode& bytecode);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 78d36f5044..8e95913e57 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#include "src/globals.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone/zone-containers.h"
@@ -20,7 +21,7 @@ namespace interpreter {
// interpreter. Each instance of this class is intended to be used to
// generate exactly one FixedArray of constants via the ToFixedArray
// method.
-class ConstantArrayBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
public:
// Capacity of the 8-bit operand slice.
static const size_t k8BitCapacity = 1u << kBitsPerByte;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 5767ffa8a5..c8ce5539e9 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -97,17 +97,17 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Label context_search(this, 2, context_search_loop_variables);
// Fast path if the depth is 0.
- BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
+ Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0.
Bind(&context_search);
{
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
- LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
- BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
- &context_search);
+ Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
+ &context_search);
}
Bind(&context_found);
@@ -135,14 +135,14 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
// contexts actually need to be checked.
Node* extension_slot =
- LoadContextSlot(cur_context.value(), Context::EXTENSION_INDEX);
+ LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
// Jump to the target if the extension slot is not a hole.
GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
- LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
&context_search);
@@ -485,26 +485,6 @@ Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
}
}
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
- return Load(MachineType::AnyTagged(), context,
- IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Load(MachineType::AnyTagged(), context, offset);
-}
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
- Node* value) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Store(MachineRepresentation::kTagged, context, offset, value);
-}
-
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = LoadRegister(Register::function_closure());
Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
@@ -566,28 +546,22 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
Variable return_value(this, MachineRepresentation::kTagged);
- Label handle_monomorphic(this), extra_checks(this), end(this), call(this),
- call_function(this), call_without_feedback(this);
-
- // Slot id of 0 is used to indicate no typefeedback is available. Call using
- // call builtin.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
- GotoIf(is_feedback_unavailable, &call_without_feedback);
+ Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
+ end(this);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* feedback_value = LoadWeakCellValue(feedback_element);
+ Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
- BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
+ GotoUnless(is_monomorphic, &extra_checks);
- Bind(&handle_monomorphic);
- {
- // The compare above could have been a SMI/SMI comparison. Guard against
- // this convincing us that we have a monomorphic JSFunction.
- Node* is_smi = WordIsSmi(function);
- GotoIf(is_smi, &extra_checks);
+ // The compare above could have been a SMI/SMI comparison. Guard against
+ // this convincing us that we have a monomorphic JSFunction.
+ Node* is_smi = TaggedIsSmi(function);
+ Branch(is_smi, &extra_checks, &call_function);
+ Bind(&call_function);
+ {
// Increment the call count.
IncrementCallCount(type_feedback_vector, slot_id);
@@ -603,56 +577,56 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Bind(&extra_checks);
{
- Label check_initialized(this, Label::kDeferred), mark_megamorphic(this),
- check_allocation_site(this),
- create_allocation_site(this, Label::kDeferred);
- // Check if it is a megamorphic target
+ Label check_initialized(this), mark_megamorphic(this),
+ create_allocation_site(this);
+
+ Comment("check if megamorphic");
+ // Check if it is a megamorphic target.
Node* is_megamorphic = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- BranchIf(is_megamorphic, &call, &check_allocation_site);
+ GotoIf(is_megamorphic, &call);
- Bind(&check_allocation_site);
- {
- Node* is_allocation_site =
- WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
+ Comment("check if it is an allocation site");
+ Node* is_allocation_site = WordEqual(
+ LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
- // If it is not the Array() function, mark megamorphic.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, function);
- GotoUnless(is_array_function, &mark_megamorphic);
+ // If it is not the Array() function, mark megamorphic.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, function);
+ GotoUnless(is_array_function, &mark_megamorphic);
- // It is a monomorphic Array function. Increment the call count.
- IncrementCallCount(type_feedback_vector, slot_id);
-
- // Call ArrayConstructorStub.
- Callable callable_call =
- CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value =
- CallStub(callable_call.descriptor(), code_target_call, context,
- arg_count, function, feedback_element, first_arg);
- return_value.Bind(ret_value);
- Goto(&end);
- }
+ // It is a monomorphic Array function. Increment the call count.
+ IncrementCallCount(type_feedback_vector, slot_id);
+
+ // Call ArrayConstructorStub.
+ Callable callable_call =
+ CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value =
+ CallStub(callable_call.descriptor(), code_target_call, context,
+ arg_count, function, feedback_element, first_arg);
+ return_value.Bind(ret_value);
+ Goto(&end);
Bind(&check_initialized);
{
- Label possibly_monomorphic(this);
- // Check if it is uninitialized.
+ Comment("check if uninitialized");
+ // Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
GotoUnless(is_uninitialized, &mark_megamorphic);
- Node* is_smi = WordIsSmi(function);
+ Comment("handle_unitinitialized");
+ // If it is not a JSFunction mark it as megamorphic.
+ Node* is_smi = TaggedIsSmi(function);
GotoIf(is_smi, &mark_megamorphic);
- // Check if function is an object of JSFunction type
+ // Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
@@ -665,7 +639,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* is_array_function = WordEqual(context_slot, function);
GotoIf(is_array_function, &create_allocation_site);
- // Check if the function belongs to the same native context
+ // Check if the function belongs to the same native context.
Node* native_context = LoadNativeContext(
LoadObjectField(function, JSFunction::kContextOffset));
Node* is_same_native_context =
@@ -704,22 +678,9 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
}
}
- Bind(&call_function);
- {
- // Increment the call count.
- IncrementCallCount(type_feedback_vector, slot_id);
-
- Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kJSFunction);
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
- context, arg_count, first_arg, function);
- return_value.Bind(ret_value);
- Goto(&end);
- }
-
Bind(&call);
{
+ Comment("Increment call count and call using Call builtin");
// Increment the call count.
IncrementCallCount(type_feedback_vector, slot_id);
@@ -733,18 +694,6 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Goto(&end);
}
- Bind(&call_without_feedback);
- {
- // Call using call builtin.
- Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kAny);
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
- context, arg_count, first_arg, function);
- return_value.Bind(ret_value);
- Goto(&end);
- }
-
Bind(&end);
return return_value.value();
}
@@ -763,10 +712,10 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* new_target, Node* first_arg,
Node* arg_count, Node* slot_id,
Node* type_feedback_vector) {
- Label call_construct(this), js_function(this), end(this);
Variable return_value(this, MachineRepresentation::kTagged);
Variable allocation_feedback(this, MachineRepresentation::kTagged);
- allocation_feedback.Bind(UndefinedConstant());
+ Label call_construct_function(this, &allocation_feedback),
+ extra_checks(this, Label::kDeferred), call_construct(this), end(this);
// Slot id of 0 is used to indicate no type feedback is available.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
@@ -774,139 +723,125 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
GotoIf(is_feedback_unavailable, &call_construct);
// Check that the constructor is not a smi.
- Node* is_smi = WordIsSmi(constructor);
+ Node* is_smi = TaggedIsSmi(constructor);
GotoIf(is_smi, &call_construct);
// Check that constructor is a JSFunction.
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
- BranchIf(is_js_function, &js_function, &call_construct);
+ GotoUnless(is_js_function, &call_construct);
- Bind(&js_function);
+ // Check if it is a monomorphic constructor.
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
+ Node* is_monomorphic = WordEqual(constructor, feedback_value);
+ allocation_feedback.Bind(UndefinedConstant());
+ Branch(is_monomorphic, &call_construct_function, &extra_checks);
+
+ Bind(&call_construct_function);
+ {
+ Comment("call using callConstructFunction");
+ IncrementCallCount(type_feedback_vector, slot_id);
+ Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
+ isolate(), CallableType::kJSFunction);
+ return_value.Bind(CallStub(callable_function.descriptor(),
+ HeapConstant(callable_function.code()), context,
+ arg_count, new_target, constructor,
+ allocation_feedback.value(), first_arg));
+ Goto(&end);
+ }
+
+ Bind(&extra_checks);
{
- // Cache the called function in a feedback vector slot. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // TODO(mythria/v8:5210): Check if it is better to mark extra_checks as a
- // deferred block so that call_construct_function will be scheduled.
- Label extra_checks(this), call_construct_function(this);
-
- Node* feedback_element =
- LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* feedback_value = LoadWeakCellValue(feedback_element);
- Node* is_monomorphic = WordEqual(constructor, feedback_value);
- BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
-
- Bind(&extra_checks);
+ Label check_allocation_site(this), check_initialized(this),
+ initialize(this), mark_megamorphic(this);
+
+ // Check if it is a megamorphic target.
+ Comment("check if megamorphic");
+ Node* is_megamorphic = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ GotoIf(is_megamorphic, &call_construct_function);
+
+ Comment("check if weak cell");
+ Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
+ LoadRoot(Heap::kWeakCellMapRootIndex));
+ GotoUnless(is_weak_cell, &check_allocation_site);
+
+ // If the weak cell is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak cell is cleared");
+ Node* is_smi = TaggedIsSmi(feedback_value);
+ Branch(is_smi, &initialize, &mark_megamorphic);
+
+ Bind(&check_allocation_site);
{
- Label mark_megamorphic(this), initialize(this),
- check_allocation_site(this), check_initialized(this),
- set_alloc_feedback_and_call(this);
- {
- // Check if it is a megamorphic target
- Comment("check if megamorphic");
- Node* is_megamorphic = WordEqual(
- feedback_element,
- HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- GotoIf(is_megamorphic, &call_construct_function);
-
- Comment("check if weak cell");
- Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kWeakCellMapRootIndex));
- GotoUnless(is_weak_cell, &check_allocation_site);
- // If the weak cell is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak cell is cleared");
- Node* is_smi = WordIsSmi(feedback_value);
- BranchIf(is_smi, &initialize, &mark_megamorphic);
- }
+ Comment("check if it is an allocation site");
+ Node* is_allocation_site =
+ WordEqual(LoadObjectField(feedback_element, 0),
+ LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
- Bind(&check_allocation_site);
- {
- Comment("check if it is an allocation site");
- Node* is_allocation_site =
- WordEqual(LoadObjectField(feedback_element, 0),
- LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
-
- // Make sure the function is the Array() function
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, constructor);
- BranchIf(is_array_function, &set_alloc_feedback_and_call,
- &mark_megamorphic);
- }
+ // Make sure the function is the Array() function.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ GotoUnless(is_array_function, &mark_megamorphic);
- Bind(&set_alloc_feedback_and_call);
- {
- allocation_feedback.Bind(feedback_element);
- Goto(&call_construct_function);
- }
+ allocation_feedback.Bind(feedback_element);
+ Goto(&call_construct_function);
+ }
- Bind(&check_initialized);
- {
- // Check if it is uninitialized.
- Comment("check if uninitialized");
- Node* is_uninitialized = WordEqual(
- feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
- BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
- }
+ Bind(&check_initialized);
+ {
+ // Check if it is uninitialized.
+ Comment("check if uninitialized");
+ Node* is_uninitialized = WordEqual(
+ feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ Branch(is_uninitialized, &initialize, &mark_megamorphic);
+ }
- Bind(&initialize);
- {
- Label create_weak_cell(this), create_allocation_site(this);
- Comment("initialize the feedback element");
- // Check that it is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, constructor);
- BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
-
- Bind(&create_allocation_site);
- {
- Node* site = CreateAllocationSiteInFeedbackVector(
- type_feedback_vector, SmiTag(slot_id));
- allocation_feedback.Bind(site);
- Goto(&call_construct_function);
- }
+ Bind(&initialize);
+ {
+ Label create_allocation_site(this), create_weak_cell(this);
+ Comment("initialize the feedback element");
+ // Create an allocation site if the function is an array function,
+ // otherwise create a weak cell.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ Branch(is_array_function, &create_allocation_site, &create_weak_cell);
- Bind(&create_weak_cell);
- {
- CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
- constructor);
- Goto(&call_construct_function);
- }
+ Bind(&create_allocation_site);
+ {
+ Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+ SmiTag(slot_id));
+ allocation_feedback.Bind(site);
+ Goto(&call_construct_function);
}
- Bind(&mark_megamorphic);
+ Bind(&create_weak_cell);
{
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(
- Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
- StoreFixedArrayElement(
- type_feedback_vector, slot_id,
- HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
+ CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+ constructor);
Goto(&call_construct_function);
}
}
- Bind(&call_construct_function);
+ Bind(&mark_megamorphic);
{
- Comment("call using callConstructFunction");
- IncrementCallCount(type_feedback_vector, slot_id);
- Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
- isolate(), CallableType::kJSFunction);
- return_value.Bind(CallStub(callable_function.descriptor(),
- HeapConstant(callable_function.code()),
- context, arg_count, new_target, constructor,
- allocation_feedback.value(), first_arg));
- Goto(&end);
+ // MegamorphicSentinel is an immortal immovable object so
+ // write-barrier is not needed.
+ Comment("transition to megamorphic");
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ StoreFixedArrayElement(
+ type_feedback_vector, slot_id,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ Goto(&call_construct_function);
}
}
@@ -1007,7 +942,7 @@ Node* InterpreterAssembler::Jump(Node* delta) {
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
- BranchIf(condition, &match, &no_match);
+ Branch(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
@@ -1035,12 +970,12 @@ Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Label do_inline_star(this), done(this);
- Variable var_bytecode(this, MachineRepresentation::kWord8);
+ Variable var_bytecode(this, MachineType::PointerRepresentation());
var_bytecode.Bind(target_bytecode);
Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
Node* is_star = WordEqual(target_bytecode, star_bytecode);
- BranchIf(is_star, &do_inline_star, &done);
+ Branch(is_star, &do_inline_star, &done);
Bind(&do_inline_star);
{
@@ -1161,7 +1096,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -1178,7 +1113,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ Node* value_map = LoadMap(value);
+ Branch(WordEqual(value_map, HeapNumberMapConstant()),
&if_valueisheapnumber, &if_valueisnotheapnumber);
Bind(&if_valueisheapnumber);
@@ -1193,11 +1129,36 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Bind(&if_valueisnotheapnumber);
{
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
- Goto(&loop);
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this,
+ Word32Equal(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(this),
+ if_valueisnotoddball(this, Label::kDeferred);
+ Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
+ Int32Constant(ODDBALL_TYPE));
+ Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
+
+ Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to a Number and perform checks again.
+ var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback->Bind(
+ Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ Goto(&loop);
+ }
+
+ Bind(&if_valueisnotoddball);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ Goto(&loop);
+ }
}
}
}
@@ -1241,7 +1202,7 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
Label ok(this), abort(this, Label::kDeferred);
- BranchIfWordEqual(lhs, rhs, &ok, &abort);
+ Branch(WordEqual(lhs, rhs), &ok, &abort);
Bind(&abort);
Abort(bailout_reason);
@@ -1271,7 +1232,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
- BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+ Branch(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok);
{
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 9dda20af48..aefd2bc053 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -9,6 +9,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frames.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -17,7 +18,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class InterpreterAssembler : public CodeStubAssembler {
+class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
OperandScale operand_scale);
@@ -92,15 +93,6 @@ class InterpreterAssembler : public CodeStubAssembler {
// Load and untag constant at |index| in the constant pool.
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
- // Load |slot_index| from |context|.
- compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
- compiler::Node* LoadContextSlot(compiler::Node* context,
- compiler::Node* slot_index);
- // Stores |value| into |slot_index| of |context|.
- compiler::Node* StoreContextSlot(compiler::Node* context,
- compiler::Node* slot_index,
- compiler::Node* value);
-
// Load the TypeFeedbackVector for the current function.
compiler::Node* LoadTypeFeedbackVector();
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index 600b9c086f..b46ca878cc 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -125,7 +125,7 @@ Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
return_false(assembler_), end(assembler_);
Node* arg = __ LoadRegister(input);
- __ GotoIf(__ WordIsSmi(arg), &return_false);
+ __ GotoIf(__ TaggedIsSmi(arg), &return_false);
Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
__ Branch(condition, &return_true, &return_false);
@@ -154,7 +154,7 @@ Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
end(assembler_);
Node* arg = __ LoadRegister(input);
- __ GotoIf(__ WordIsSmi(arg), &return_false);
+ __ GotoIf(__ TaggedIsSmi(arg), &return_false);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
@@ -202,7 +202,7 @@ Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* arg = __ LoadRegister(input);
- __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+ __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
__ Bind(&if_smi);
{
return_value.Bind(__ BooleanConstant(true));
@@ -249,12 +249,6 @@ Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
CodeFactory::NumberToString(isolate()));
}
-Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::RegExpConstructResult(isolate()));
-}
-
Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsStubCall(input, context,
@@ -321,7 +315,7 @@ Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
return_value.Bind(object);
// If the object is a smi return the object.
- __ GotoIf(__ WordIsSmi(object), &done);
+ __ GotoIf(__ TaggedIsSmi(object), &done);
// If the object is not a value type, return the object.
Node* condition =
@@ -346,7 +340,7 @@ Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
Node* object = __ LoadRegister(args_reg);
// If the object is not a JSReceiver, we return null.
- __ GotoIf(__ WordIsSmi(object), &null);
+ __ GotoIf(__ TaggedIsSmi(object), &null);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
kInstanceTypeGreaterThanOrEqual);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 11fe4a0a8e..70ff291df3 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -35,7 +35,6 @@ namespace interpreter {
V(IsTypedArray, is_typed_array, 1) \
V(NewObject, new_object, 2) \
V(NumberToString, number_to_string, 1) \
- V(RegExpConstructResult, reg_exp_construct_result, 3) \
V(RegExpExec, reg_exp_exec, 4) \
V(SubString, sub_string, 3) \
V(ToString, to_string, 1) \
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 410030247f..81aecafecf 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -53,8 +53,8 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
void Interpreter::Initialize() {
- if (IsDispatchTableInitialized()) return;
- Zone zone(isolate_->allocator());
+ if (!ShouldInitializeDispatchTable()) return;
+ Zone zone(isolate_->allocator(), ZONE_NAME);
HandleScope scope(isolate_);
if (FLAG_trace_ignition_dispatches) {
@@ -103,6 +103,9 @@ void Interpreter::Initialize() {
dispatch_table_[index] = dispatch_table_[illegal_index];
}
}
+
+ // Initialization should have been successful.
+ DCHECK(IsDispatchTableInitialized());
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
@@ -197,6 +200,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return FAILED;
}
+ CodeGenerator::MakeCodePrologue(info(), "interpreter");
+
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -213,13 +218,17 @@ CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
}
bool Interpreter::IsDispatchTableInitialized() {
+ return dispatch_table_[0] != nullptr;
+}
+
+bool Interpreter::ShouldInitializeDispatchTable() {
if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
FLAG_trace_ignition_dispatches) {
// Regenerate table to add bytecode tracing operations, print the assembly
// code generated by TurboFan or instrument handlers with dispatch counters.
- return false;
+ return true;
}
- return dispatch_table_[0] != nullptr;
+ return !IsDispatchTableInitialized();
}
void Interpreter::TraceCodegen(Handle<Code> code) {
@@ -343,17 +352,6 @@ void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrUndefined <reg>
-//
-// Loads undefined into the accumulator and |reg|.
-void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
- Node* undefined_value =
- __ HeapConstant(isolate_->factory()->undefined_value());
- Node* destination = __ BytecodeOperandReg(0);
- __ StoreRegister(undefined_value, destination);
- __ Dispatch();
-}
-
// LdaNull
//
// Load Null into the accumulator.
@@ -451,23 +449,6 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrGlobal <slot> <reg>
-//
-// Load the global with name in constant pool entry <name_index> into
-// register <reg> using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-
- Node* context = __ GetContext();
-
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
- Node* destination = __ BytecodeOperandReg(1);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
// LdaGlobalInsideTypeof <slot>
//
// Load the global with name in constant pool entry <name_index> into the
@@ -488,9 +469,9 @@ void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* global =
+ __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
// Store the global via the StoreIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -525,34 +506,29 @@ void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
DoStaGlobal(ic, assembler);
}
-compiler::Node* Interpreter::BuildLoadContextSlot(
- InterpreterAssembler* assembler) {
- Node* reg_index = __ BytecodeOperandReg(0);
- Node* context = __ LoadRegister(reg_index);
- Node* slot_index = __ BytecodeOperandIdx(1);
- Node* depth = __ BytecodeOperandUImm(2);
- Node* slot_context = __ GetContextAtDepth(context, depth);
- return __ LoadContextSlot(slot_context, slot_index);
-}
-
// LdaContextSlot <context> <slot_index> <depth>
//
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* depth = __ BytecodeOperandUImm(2);
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrContextSlot <context> <slot_index> <depth> <reg>
+// LdaCurrentContextSlot <slot_index>
//
-// Load the object in |slot_index| of the context at |depth| in the context
-// chain of |context| into register |reg|.
-void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ Node* result = __ LoadContextElement(slot_context, slot_index);
+ __ SetAccumulator(result);
__ Dispatch();
}
@@ -567,7 +543,19 @@ void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
Node* slot_index = __ BytecodeOperandIdx(1);
Node* depth = __ BytecodeOperandUImm(2);
Node* slot_context = __ GetContextAtDepth(context, depth);
- __ StoreContextSlot(slot_context, slot_index, value);
+ __ StoreContextElement(slot_context, slot_index, value);
+ __ Dispatch();
+}
+
+// StaCurrentContextSlot <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of the current
+// context.
+void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ __ StoreContextElement(slot_context, slot_index, value);
__ Dispatch();
}
@@ -612,7 +600,7 @@ void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
// Fast path does a normal load context.
{
Node* slot_context = __ GetContextAtDepth(context, depth);
- Node* result = __ LoadContextSlot(slot_context, slot_index);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -724,9 +712,13 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
DoStaLookupSlot(LanguageMode::STRICT, assembler);
}
-Node* Interpreter::BuildLoadNamedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -736,38 +728,21 @@ Node* Interpreter::BuildLoadNamedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// LdaNamedProperty <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrNamedProperty <object> <name_index> <slot> <reg>
+// KeyedLoadIC <object> <slot>
//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index> and puts the result into register <reg>.
-void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
-Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -776,35 +751,14 @@ Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// KeyedLoadIC <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrKeyedProperty <object> <slot> <reg>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator and puts the result in register <reg>.
-void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(2);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
@@ -881,6 +835,88 @@ void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
+// LdaModuleVariable <cell_index> <depth>
+//
+// Load the contents of a module variable into the accumulator. The variable is
+// identified by <cell_index>. <depth> is the depth of the current context
+// relative to the module context.
+void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ Node* regular_imports =
+ __ LoadObjectField(module, Module::kRegularImportsOffset);
+ // The actual array index is (-cell_index - 1).
+ Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
+ Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// StaModuleVariable <cell_index> <depth>
+//
+// Store accumulator to the module variable identified by <cell_index>.
+// <depth> is the depth of the current context relative to the module context.
+void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ StoreObjectField(cell, Cell::kValueOffset, value);
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ // Not supported (probably never).
+ __ Abort(kUnsupportedModuleOperation);
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
// PushContext <context>
//
// Saves the current context in <context>, and pushes the accumulator as the
@@ -904,14 +940,24 @@ void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// TODO(mythria): Remove this function once all BinaryOps record type feedback.
-template <class Generator>
-void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+// TODO(mythria): Remove this function once all CompareOps record type feedback.
+void Interpreter::DoCompareOp(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::IN:
+ result = assembler->HasProperty(rhs, lhs, context);
+ break;
+ case Token::INSTANCEOF:
+ result = assembler->InstanceOf(lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -930,8 +976,8 @@ void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
__ Dispatch();
}
-template <class Generator>
-void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
@@ -950,7 +996,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
gather_rhs_type(assembler), do_compare(assembler);
- __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
__ Bind(&lhs_is_smi);
var_type_feedback.Bind(
@@ -976,7 +1022,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Bind(&gather_rhs_type);
{
Label rhs_is_smi(assembler);
- __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+ __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
Node* rhs_map = __ LoadMap(rhs);
Node* rhs_type =
@@ -999,7 +1045,39 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Goto(&skip_feedback_update);
__ Bind(&skip_feedback_update);
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::EQ:
+ result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
+ context);
+ break;
+ case Token::NE:
+ result =
+ assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
+ break;
+ case Token::EQ_STRICT:
+ result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
+ rhs, context);
+ break;
+ case Token::LT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
+ lhs, rhs, context);
+ break;
+ case Token::GT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
+ lhs, rhs, context);
+ break;
+ case Token::LTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
+ break;
+ case Token::GTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1089,13 +1167,13 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
}
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
if (FLAG_debug_code) {
Label ok(assembler);
- __ GotoIf(__ WordIsSmi(result), &ok);
+ __ GotoIf(__ TaggedIsSmi(result), &ok);
Node* result_map = __ LoadMap(result);
__ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
kExpectedHeapNumber);
@@ -1180,21 +1258,22 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi addition first.
- Node* pair = __ SmiAddWithOverflow(left, right);
+ Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi additon overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1233,21 +1312,22 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi subtraction first.
- Node* pair = __ SmiSubWithOverflow(left, right);
+ Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1287,7 +1367,7 @@ void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1315,7 +1395,7 @@ void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1345,7 +1425,7 @@ void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1375,7 +1455,7 @@ void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1393,15 +1473,6 @@ Node* Interpreter::BuildUnaryOp(Callable callable,
}
template <class Generator>
-void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
- Node* value = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, value, context);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-template <class Generator>
void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* context = __ GetContext();
@@ -1495,7 +1566,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
- __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+ __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
__ Bind(&if_true);
{
result.Bind(false_value);
@@ -1520,7 +1591,11 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
- DoUnaryOp<TypeofStub>(assembler);
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = assembler->Typeof(value, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -1578,6 +1653,17 @@ void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
+// CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers. Collect type feedback into
+// |feedback_slot_id|. The callable is known to be a property of the receiver.
+void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
+ // TODO(leszeks): Look into making the interpreter use the fact that the
+ // receiver is non-null.
+ DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -1660,9 +1746,8 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
// Get the function to call from the native context.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* function = __ LoadContextSlot(native_context, context_index);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* function = __ LoadContextElement(native_context, context_index);
// Call the function.
Node* result = __ CallJS(function, context, first_arg, args_count,
@@ -1698,35 +1783,35 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<EqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ, assembler);
}
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<NotEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::NE, assembler);
}
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<StrictEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
}
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LT, assembler);
}
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GT, assembler);
}
// TestLessThanOrEqual <src>
@@ -1734,7 +1819,7 @@ void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LTE, assembler);
}
// TestGreaterThanOrEqual <src>
@@ -1742,7 +1827,7 @@ void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GTE, assembler);
}
// TestIn <src>
@@ -1750,7 +1835,7 @@ void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
- DoBinaryOp<HasPropertyStub>(assembler);
+ DoCompareOp(Token::IN, assembler);
}
// TestInstanceOf <src>
@@ -1758,7 +1843,7 @@ void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
- DoBinaryOp<InstanceOfStub>(assembler);
+ DoCompareOp(Token::INSTANCEOF, assembler);
}
// Jump <imm>
@@ -2025,7 +2110,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
Node* use_fast_shallow_clone = __ Word32And(
bytecode_flags,
__ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
- __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+ __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
@@ -2068,9 +2153,9 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
Node* fast_clone_properties_count =
- __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+ __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
bytecode_flags);
- __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+ __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
@@ -2217,7 +2302,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* duplicate_parameters_bit = __ Int32Constant(
1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
- __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+ __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_not_duplicate_parameters);
{
@@ -2273,7 +2358,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
- __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+ __ Branch(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch();
@@ -2363,7 +2448,7 @@ void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
Node* object_reg = __ BytecodeOperandReg(0);
Node* receiver = __ LoadRegister(object_reg);
Node* context = __ GetContext();
- Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
+ Node* const zero_smi = __ SmiConstant(Smi::kZero);
Label nothing_to_iterate(assembler, Label::kDeferred),
use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
@@ -2446,7 +2531,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
- __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
+ __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -2483,7 +2568,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
// Check if {index} is at {cache_length} already.
Label if_true(assembler), if_false(assembler), end(assembler);
- __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
+ __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
__ Bind(&if_true);
{
__ SetAccumulator(__ BooleanConstant(false));
@@ -2554,7 +2639,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
STATIC_ASSERT(StepFrame > StepNext);
STATIC_ASSERT(LastStepAction == StepFrame);
Node* step_next = __ Int32Constant(StepNext);
- __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+ __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok);
Node* array =
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index b646bf8313..b10ae2e451 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -78,16 +78,12 @@ class Interpreter {
// Generates code to perform the binary operation via |Generator|.
template <class Generator>
- void DoBinaryOp(InterpreterAssembler* assembler);
-
- // Generates code to perform the binary operation via |Generator|.
- template <class Generator>
void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
// Generates code to perform the comparison via |Generator| while gathering
// type feedback.
- template <class Generator>
- void DoCompareOpWithFeedback(InterpreterAssembler* assembler);
+ void DoCompareOpWithFeedback(Token::Value compare_op,
+ InterpreterAssembler* assembler);
// Generates code to perform the bitwise binary operation corresponding to
// |bitwise_op| while gathering type feedback.
@@ -99,10 +95,6 @@ class Interpreter {
template <class Generator>
void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
- // Generates code to perform the unary operation via |Generator|.
- template <class Generator>
- void DoUnaryOp(InterpreterAssembler* assembler);
-
// Generates code to perform the unary operation via |Generator| while
// gatering type feedback.
template <class Generator>
@@ -147,22 +139,11 @@ class Interpreter {
void DoStaLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler);
- // Generates code to load a context slot.
- compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
-
// Generates code to load a global.
compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
compiler::Node* feedback_slot,
InterpreterAssembler* assembler);
- // Generates code to load a named property.
- compiler::Node* BuildLoadNamedProperty(Callable ic,
- InterpreterAssembler* assembler);
-
- // Generates code to load a keyed property.
- compiler::Node* BuildLoadKeyedProperty(Callable ic,
- InterpreterAssembler* assembler);
-
// Generates code to prepare the result for ForInPrepare. Cache data
// are placed into the consecutive series of registers starting at
// |output_register|.
@@ -183,6 +164,7 @@ class Interpreter {
OperandScale operand_scale);
bool IsDispatchTableInitialized();
+ bool ShouldInitializeDispatchTable();
static const int kNumberOfWideVariants = 3;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
index 270fe83ef9..62d3a77e02 100644
--- a/deps/v8/src/interpreter/mkpeephole.cc
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -79,33 +79,6 @@ const char* PeepholeActionTableWriter::kNamespaceElements[] = {"v8", "internal",
// static
PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
Bytecode last, Bytecode current) {
- // Optimize various accumulator loads followed by store accumulator
- // to an equivalent register load and loading the accumulator with
- // the register. The latter accumulator load can often be elided as
- // it is side-effect free and often followed by another accumulator
- // load so can be elided.
- if (current == Bytecode::kStar) {
- switch (last) {
- case Bytecode::kLdaNamedProperty:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrNamedProperty};
- case Bytecode::kLdaKeyedProperty:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrKeyedProperty};
- case Bytecode::kLdaGlobal:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrGlobal};
- case Bytecode::kLdaContextSlot:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrContextSlot};
- case Bytecode::kLdaUndefined:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrUndefined};
- default:
- break;
- }
- }
-
// ToName bytecodes can be replaced by Star with the same output register if
// the value in the accumulator is already a name.
if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) {