summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter/interpreter.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-02-14 11:27:26 +0100
committerMichaël Zasso <targos@protonmail.com>2017-02-22 15:55:42 +0100
commit7a77daf24344db7942e34c962b0f1ee729ab7af5 (patch)
treee7cbe7bf4e2f4b802a8f5bc18336c546cd6a0d7f /deps/v8/src/interpreter/interpreter.cc
parent5f08871ee93ea739148cc49e0f7679e33c70295a (diff)
downloadnode-new-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.gz
deps: update V8 to 5.6.326.55
PR-URL: https://github.com/nodejs/node/pull/10992 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/interpreter/interpreter.cc')
-rw-r--r--deps/v8/src/interpreter/interpreter.cc399
1 files changed, 242 insertions, 157 deletions
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 410030247f..81aecafecf 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -53,8 +53,8 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
void Interpreter::Initialize() {
- if (IsDispatchTableInitialized()) return;
- Zone zone(isolate_->allocator());
+ if (!ShouldInitializeDispatchTable()) return;
+ Zone zone(isolate_->allocator(), ZONE_NAME);
HandleScope scope(isolate_);
if (FLAG_trace_ignition_dispatches) {
@@ -103,6 +103,9 @@ void Interpreter::Initialize() {
dispatch_table_[index] = dispatch_table_[illegal_index];
}
}
+
+ // Initialization should have been successful.
+ DCHECK(IsDispatchTableInitialized());
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
@@ -197,6 +200,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return FAILED;
}
+ CodeGenerator::MakeCodePrologue(info(), "interpreter");
+
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -213,13 +218,17 @@ CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
}
bool Interpreter::IsDispatchTableInitialized() {
+ return dispatch_table_[0] != nullptr;
+}
+
+bool Interpreter::ShouldInitializeDispatchTable() {
if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
FLAG_trace_ignition_dispatches) {
// Regenerate table to add bytecode tracing operations, print the assembly
// code generated by TurboFan or instrument handlers with dispatch counters.
- return false;
+ return true;
}
- return dispatch_table_[0] != nullptr;
+ return !IsDispatchTableInitialized();
}
void Interpreter::TraceCodegen(Handle<Code> code) {
@@ -343,17 +352,6 @@ void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrUndefined <reg>
-//
-// Loads undefined into the accumulator and |reg|.
-void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
- Node* undefined_value =
- __ HeapConstant(isolate_->factory()->undefined_value());
- Node* destination = __ BytecodeOperandReg(0);
- __ StoreRegister(undefined_value, destination);
- __ Dispatch();
-}
-
// LdaNull
//
// Load Null into the accumulator.
@@ -451,23 +449,6 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrGlobal <slot> <reg>
-//
-// Load the global with name in constant pool entry <name_index> into
-// register <reg> using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-
- Node* context = __ GetContext();
-
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
- Node* destination = __ BytecodeOperandReg(1);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
// LdaGlobalInsideTypeof <slot>
//
// Load the global with name in constant pool entry <name_index> into the
@@ -488,9 +469,9 @@ void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* global =
+ __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
// Store the global via the StoreIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -525,34 +506,29 @@ void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
DoStaGlobal(ic, assembler);
}
-compiler::Node* Interpreter::BuildLoadContextSlot(
- InterpreterAssembler* assembler) {
- Node* reg_index = __ BytecodeOperandReg(0);
- Node* context = __ LoadRegister(reg_index);
- Node* slot_index = __ BytecodeOperandIdx(1);
- Node* depth = __ BytecodeOperandUImm(2);
- Node* slot_context = __ GetContextAtDepth(context, depth);
- return __ LoadContextSlot(slot_context, slot_index);
-}
-
// LdaContextSlot <context> <slot_index> <depth>
//
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* depth = __ BytecodeOperandUImm(2);
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrContextSlot <context> <slot_index> <depth> <reg>
+// LdaCurrentContextSlot <slot_index>
//
-// Load the object in |slot_index| of the context at |depth| in the context
-// chain of |context| into register |reg|.
-void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ Node* result = __ LoadContextElement(slot_context, slot_index);
+ __ SetAccumulator(result);
__ Dispatch();
}
@@ -567,7 +543,19 @@ void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
Node* slot_index = __ BytecodeOperandIdx(1);
Node* depth = __ BytecodeOperandUImm(2);
Node* slot_context = __ GetContextAtDepth(context, depth);
- __ StoreContextSlot(slot_context, slot_index, value);
+ __ StoreContextElement(slot_context, slot_index, value);
+ __ Dispatch();
+}
+
+// StaCurrentContextSlot <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of the current
+// context.
+void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ __ StoreContextElement(slot_context, slot_index, value);
__ Dispatch();
}
@@ -612,7 +600,7 @@ void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
// Fast path does a normal load context.
{
Node* slot_context = __ GetContextAtDepth(context, depth);
- Node* result = __ LoadContextSlot(slot_context, slot_index);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -724,9 +712,13 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
DoStaLookupSlot(LanguageMode::STRICT, assembler);
}
-Node* Interpreter::BuildLoadNamedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -736,38 +728,21 @@ Node* Interpreter::BuildLoadNamedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// LdaNamedProperty <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrNamedProperty <object> <name_index> <slot> <reg>
+// KeyedLoadIC <object> <slot>
//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index> and puts the result into register <reg>.
-void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
-Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -776,35 +751,14 @@ Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// KeyedLoadIC <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrKeyedProperty <object> <slot> <reg>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator and puts the result in register <reg>.
-void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(2);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
@@ -881,6 +835,88 @@ void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
+// LdaModuleVariable <cell_index> <depth>
+//
+// Load the contents of a module variable into the accumulator. The variable is
+// identified by <cell_index>. <depth> is the depth of the current context
+// relative to the module context.
+void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ Node* regular_imports =
+ __ LoadObjectField(module, Module::kRegularImportsOffset);
+ // The actual array index is (-cell_index - 1).
+ Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
+ Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// StaModuleVariable <cell_index> <depth>
+//
+// Store accumulator to the module variable identified by <cell_index>.
+// <depth> is the depth of the current context relative to the module context.
+void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ StoreObjectField(cell, Cell::kValueOffset, value);
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ // Not supported (probably never).
+ __ Abort(kUnsupportedModuleOperation);
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
// PushContext <context>
//
// Saves the current context in <context>, and pushes the accumulator as the
@@ -904,14 +940,24 @@ void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// TODO(mythria): Remove this function once all BinaryOps record type feedback.
-template <class Generator>
-void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+// TODO(mythria): Remove this function once all CompareOps record type feedback.
+void Interpreter::DoCompareOp(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::IN:
+ result = assembler->HasProperty(rhs, lhs, context);
+ break;
+ case Token::INSTANCEOF:
+ result = assembler->InstanceOf(lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -930,8 +976,8 @@ void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
__ Dispatch();
}
-template <class Generator>
-void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
@@ -950,7 +996,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
gather_rhs_type(assembler), do_compare(assembler);
- __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
__ Bind(&lhs_is_smi);
var_type_feedback.Bind(
@@ -976,7 +1022,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Bind(&gather_rhs_type);
{
Label rhs_is_smi(assembler);
- __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+ __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
Node* rhs_map = __ LoadMap(rhs);
Node* rhs_type =
@@ -999,7 +1045,39 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Goto(&skip_feedback_update);
__ Bind(&skip_feedback_update);
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::EQ:
+ result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
+ context);
+ break;
+ case Token::NE:
+ result =
+ assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
+ break;
+ case Token::EQ_STRICT:
+ result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
+ rhs, context);
+ break;
+ case Token::LT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
+ lhs, rhs, context);
+ break;
+ case Token::GT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
+ lhs, rhs, context);
+ break;
+ case Token::LTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
+ break;
+ case Token::GTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1089,13 +1167,13 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
}
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
if (FLAG_debug_code) {
Label ok(assembler);
- __ GotoIf(__ WordIsSmi(result), &ok);
+ __ GotoIf(__ TaggedIsSmi(result), &ok);
Node* result_map = __ LoadMap(result);
__ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
kExpectedHeapNumber);
@@ -1180,21 +1258,22 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi addition first.
- Node* pair = __ SmiAddWithOverflow(left, right);
+ Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi additon overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1233,21 +1312,22 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi subtraction first.
- Node* pair = __ SmiSubWithOverflow(left, right);
+ Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1287,7 +1367,7 @@ void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1315,7 +1395,7 @@ void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1345,7 +1425,7 @@ void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1375,7 +1455,7 @@ void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1393,15 +1473,6 @@ Node* Interpreter::BuildUnaryOp(Callable callable,
}
template <class Generator>
-void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
- Node* value = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, value, context);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-template <class Generator>
void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* context = __ GetContext();
@@ -1495,7 +1566,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
- __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+ __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
__ Bind(&if_true);
{
result.Bind(false_value);
@@ -1520,7 +1591,11 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
- DoUnaryOp<TypeofStub>(assembler);
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = assembler->Typeof(value, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -1578,6 +1653,17 @@ void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
+// CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers. Collect type feedback into
+// |feedback_slot_id|. The callable is known to be a property of the receiver.
+void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
+ // TODO(leszeks): Look into making the interpreter use the fact that the
+ // receiver is non-null.
+ DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -1660,9 +1746,8 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
// Get the function to call from the native context.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* function = __ LoadContextSlot(native_context, context_index);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* function = __ LoadContextElement(native_context, context_index);
// Call the function.
Node* result = __ CallJS(function, context, first_arg, args_count,
@@ -1698,35 +1783,35 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<EqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ, assembler);
}
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<NotEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::NE, assembler);
}
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<StrictEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
}
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LT, assembler);
}
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GT, assembler);
}
// TestLessThanOrEqual <src>
@@ -1734,7 +1819,7 @@ void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LTE, assembler);
}
// TestGreaterThanOrEqual <src>
@@ -1742,7 +1827,7 @@ void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GTE, assembler);
}
// TestIn <src>
@@ -1750,7 +1835,7 @@ void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
- DoBinaryOp<HasPropertyStub>(assembler);
+ DoCompareOp(Token::IN, assembler);
}
// TestInstanceOf <src>
@@ -1758,7 +1843,7 @@ void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
- DoBinaryOp<InstanceOfStub>(assembler);
+ DoCompareOp(Token::INSTANCEOF, assembler);
}
// Jump <imm>
@@ -2025,7 +2110,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
Node* use_fast_shallow_clone = __ Word32And(
bytecode_flags,
__ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
- __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+ __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
@@ -2068,9 +2153,9 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
Node* fast_clone_properties_count =
- __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+ __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
bytecode_flags);
- __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+ __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
@@ -2217,7 +2302,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* duplicate_parameters_bit = __ Int32Constant(
1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
- __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+ __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_not_duplicate_parameters);
{
@@ -2273,7 +2358,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
- __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+ __ Branch(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch();
@@ -2363,7 +2448,7 @@ void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
Node* object_reg = __ BytecodeOperandReg(0);
Node* receiver = __ LoadRegister(object_reg);
Node* context = __ GetContext();
- Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
+ Node* const zero_smi = __ SmiConstant(Smi::kZero);
Label nothing_to_iterate(assembler, Label::kDeferred),
use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
@@ -2446,7 +2531,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
- __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
+ __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -2483,7 +2568,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
// Check if {index} is at {cache_length} already.
Label if_true(assembler), if_false(assembler), end(assembler);
- __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
+ __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
__ Bind(&if_true);
{
__ SetAccumulator(__ BooleanConstant(false));
@@ -2554,7 +2639,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
STATIC_ASSERT(StepFrame > StepNext);
STATIC_ASSERT(LastStepAction == StepFrame);
Node* step_next = __ Int32Constant(StepNext);
- __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+ __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok);
Node* array =