diff options
Diffstat (limited to 'deps/v8/src/interpreter/interpreter-assembler.cc')
-rw-r--r-- | deps/v8/src/interpreter/interpreter-assembler.cc | 592 |
1 files changed, 231 insertions, 361 deletions
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc index f01821b565..a55e074b3a 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.cc +++ b/deps/v8/src/interpreter/interpreter-assembler.cc @@ -22,8 +22,6 @@ namespace interpreter { using compiler::CodeAssemblerState; using compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, Bytecode bytecode, @@ -32,19 +30,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, bytecode_(bytecode), operand_scale_(operand_scale), TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_), - VARIABLE_CONSTRUCTOR( - bytecode_array_, MachineRepresentation::kTagged, - Parameter(InterpreterDispatchDescriptor::kBytecodeArray)), + TVARIABLE_CONSTRUCTOR( + bytecode_array_, + CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))), TVARIABLE_CONSTRUCTOR( bytecode_offset_, UncheckedCast<IntPtrT>( Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))), - VARIABLE_CONSTRUCTOR( - dispatch_table_, MachineType::PointerRepresentation(), - Parameter(InterpreterDispatchDescriptor::kDispatchTable)), - VARIABLE_CONSTRUCTOR( - accumulator_, MachineRepresentation::kTagged, - Parameter(InterpreterDispatchDescriptor::kAccumulator)), + TVARIABLE_CONSTRUCTOR( + dispatch_table_, UncheckedCast<ExternalReference>(Parameter( + InterpreterDispatchDescriptor::kDispatchTable))), + TVARIABLE_CONSTRUCTOR( + accumulator_, + CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))), accumulator_use_(AccumulatorUse::kNone), made_call_(false), reloaded_frame_ptr_(false), @@ -129,27 +127,27 @@ void InterpreterAssembler::SaveBytecodeOffset() { } } -Node* InterpreterAssembler::BytecodeArrayTaggedPointer() { +TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() { // Force a re-load of the bytecode array after every call in case the debugger // has been activated. if (!bytecode_array_valid_) { - bytecode_array_.Bind(LoadRegister(Register::bytecode_array())); + bytecode_array_ = CAST(LoadRegister(Register::bytecode_array())); bytecode_array_valid_ = true; } return bytecode_array_.value(); } -Node* InterpreterAssembler::DispatchTableRawPointer() { +TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() { if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ && (dispatch_table_.value() == Parameter(InterpreterDispatchDescriptor::kDispatchTable))) { - dispatch_table_.Bind(ExternalConstant( - ExternalReference::interpreter_dispatch_table_address(isolate()))); + dispatch_table_ = ExternalConstant( + ExternalReference::interpreter_dispatch_table_address(isolate())); } return dispatch_table_.value(); } -Node* InterpreterAssembler::GetAccumulatorUnchecked() { +TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() { return accumulator_.value(); } @@ -159,10 +157,11 @@ TNode<Object> InterpreterAssembler::GetAccumulator() { return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked()); } -void InterpreterAssembler::SetAccumulator(Node* value) { +// TODO(v8:6949): Remove sloppy-ness from SetAccumulator's value argument. +void InterpreterAssembler::SetAccumulator(SloppyTNode<Object> value) { DCHECK(Bytecodes::WritesAccumulator(bytecode_)); accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; - accumulator_.Bind(value); + accumulator_ = value; } TNode<Context> InterpreterAssembler::GetContext() { @@ -173,15 +172,14 @@ void InterpreterAssembler::SetContext(TNode<Context> value) { StoreRegister(value, Register::current_context()); } -Node* InterpreterAssembler::GetContextAtDepth(TNode<Context> context, - TNode<Uint32T> depth) { +TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context, + TNode<Uint32T> depth) { TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); Label context_found(this); - Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; - Label context_search(this, 2, context_search_loop_variables); + Label context_search(this, {&cur_depth, &cur_context}); // Fast path if the depth is 0. Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search); @@ -206,33 +204,38 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth( TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); - Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; - Label context_search(this, 2, context_search_loop_variables); + Label context_search(this, {&cur_depth, &cur_context}); + Label no_extension(this); // Loop until the depth is 0. Goto(&context_search); BIND(&context_search); { - // TODO(leszeks): We only need to do this check if the context had a sloppy - // eval, we could pass in a context chain bitmask to figure out which - // contexts actually need to be checked. + // Check if context has an extension slot + TNode<BoolT> has_extension = + LoadContextHasExtensionField(cur_context.value()); + GotoIfNot(has_extension, &no_extension); + // Jump to the target if the extension slot is not a hole. TNode<Object> extension_slot = LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX); + Branch(TaggedNotEqual(extension_slot, TheHoleConstant()), target, + &no_extension); - // Jump to the target if the extension slot is not a hole. - GotoIf(TaggedNotEqual(extension_slot, TheHoleConstant()), target); - - cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); - cur_context = - CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); + BIND(&no_extension); + { + cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); + cur_context = CAST( + LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); - GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), - &context_search); + GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), + &context_search); + } } } -TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Node* reg_index) { +TNode<IntPtrT> InterpreterAssembler::RegisterLocation( + TNode<IntPtrT> reg_index) { return Signed(WordPoisonOnSpeculation( IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)))); } @@ -241,11 +244,11 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) { return RegisterLocation(IntPtrConstant(reg.ToOperand())); } -TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(Node* index) { - return Signed(TimesSystemPointerSize(index)); +TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) { + return TimesSystemPointerSize(index); } -TNode<Object> InterpreterAssembler::LoadRegister(Node* reg_index) { +TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) { return LoadFullTagged(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), LoadSensitivity::kCritical); @@ -281,7 +284,7 @@ std::pair<TNode<Object>, TNode<Object>> InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) { DCHECK_EQ(OperandType::kRegPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); return std::make_pair(LoadRegister(first_reg_index), @@ -300,7 +303,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { return RegListNodePair(base_reg, reg_count); } -Node* InterpreterAssembler::LoadRegisterFromRegisterList( +TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList( const RegListNodePair& reg_list, int index) { TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index); // Location is already poisoned on speculation, so no need to poison here. @@ -317,29 +320,30 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList( return Signed(IntPtrSub(reg_list.base_reg_location(), offset)); } -void InterpreterAssembler::StoreRegister(Node* value, Register reg) { +void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) { StoreFullTaggedNoWriteBarrier( GetInterpretedFramePointer(), IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value); } -void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { +void InterpreterAssembler::StoreRegister(TNode<Object> value, + TNode<IntPtrT> reg_index) { StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), value); } -void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value, +void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value, int operand_index) { StoreRegister(value, BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); } -void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1, - Node* value2, +void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1, + TNode<Object> value2, int operand_index) { DCHECK_EQ(OperandType::kRegOutPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); @@ -347,10 +351,11 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1, } void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( - Node* value1, Node* value2, Node* value3, int operand_index) { + TNode<Object> value1, TNode<Object> value2, TNode<Object> value3, + int operand_index) { DCHECK_EQ(OperandType::kRegOutTriple, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); @@ -359,12 +364,12 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( StoreRegister(value3, third_reg_index); } -TNode<IntPtrT> InterpreterAssembler::NextRegister(Node* reg_index) { +TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) { // Register indexes are negative, so the next index is minus one. return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1))); } -Node* InterpreterAssembler::OperandOffset(int operand_index) { +TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) { return IntPtrConstant( Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale())); } @@ -374,7 +379,7 @@ TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte( DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); - Node* operand_offset = OperandOffset(operand_index); + TNode<IntPtrT> operand_offset = OperandOffset(operand_index); return Load<Uint8T>(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); @@ -385,7 +390,7 @@ TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte( DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); - Node* operand_offset = OperandOffset(operand_index); + TNode<IntPtrT> operand_offset = OperandOffset(operand_index); return Load<Int8T>(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); @@ -429,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned( MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8(); TNode<IntPtrT> offset = IntPtrConstant(relative_offset + msb_offset + i * kStep); - TNode<WordT> array_offset = IntPtrAdd(BytecodeOffset(), offset); + TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset); bytes[i] = UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(), array_offset, needs_poisoning)); @@ -561,7 +566,7 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) { DCHECK_EQ(OperandType::kFlag8, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -578,15 +583,16 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) { +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord( + int operand_index) { return ChangeUint32ToWord(BytecodeOperandUImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { - return SmiFromInt32(Signed(BytecodeOperandUImm(operand_index))); +TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { + return SmiFromUint32(BytecodeOperandUImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) { +TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) { DCHECK_EQ(OperandType::kImm, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -594,15 +600,17 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) { return BytecodeSignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) { +TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr( + int operand_index) { return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { +TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { return SmiFromInt32(BytecodeOperandImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32( + int operand_index) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -610,15 +618,15 @@ Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) { +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) { return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { - return SmiTag(BytecodeOperandIdx(operand_index)); +TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { + return SmiTag(Signed(BytecodeOperandIdx(operand_index))); } -Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx( +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); @@ -628,7 +636,7 @@ Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx( BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning)); } -Node* InterpreterAssembler::BytecodeOperandReg( +TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg( int operand_index, LoadSensitivity needs_poisoning) { DCHECK(Bytecodes::IsRegisterOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); @@ -638,7 +646,8 @@ Node* InterpreterAssembler::BytecodeOperandReg( BytecodeSignedOperand(operand_index, operand_size, needs_poisoning)); } -Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId( + int operand_index) { DCHECK_EQ(OperandType::kRuntimeId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -647,7 +656,7 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandNativeContextIndex( +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex( int operand_index) { DCHECK_EQ(OperandType::kNativeContextIndex, Bytecodes::GetOperandType(bytecode_, operand_index)); @@ -657,7 +666,8 @@ Node* InterpreterAssembler::BytecodeOperandNativeContextIndex( BytecodeUnsignedOperand(operand_index, operand_size)); } -Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId( + int operand_index) { DCHECK_EQ(OperandType::kIntrinsicId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -666,7 +676,7 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { +TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) { TNode<FixedArray> constant_pool = CAST(LoadObjectField( BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset)); return UnsafeLoadFixedArrayElement( @@ -674,13 +684,13 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { } TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry( - Node* index) { - return SmiUntag(LoadConstantPoolEntry(index)); + TNode<WordT> index) { + return SmiUntag(CAST(LoadConstantPoolEntry(index))); } -Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( +TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( int operand_index) { - Node* index = + TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe); return LoadConstantPoolEntry(index); } @@ -688,7 +698,7 @@ Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex( int operand_index) { - return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index)); + return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index))); } TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() { @@ -713,151 +723,15 @@ void InterpreterAssembler::CallPrologue() { void InterpreterAssembler::CallEpilogue() { } -void InterpreterAssembler::IncrementCallCount(Node* feedback_vector, - Node* slot_id) { - Comment("increment call count"); - TNode<Smi> call_count = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); - // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call - // count are used as flags. To increment the call count by 1 we hence - // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. - TNode<Smi> new_count = SmiAdd( - call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); - // Count is Smi, so we don't need a write barrier. - StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, - SKIP_WRITE_BARRIER, kTaggedSize); -} - -void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context, - Node* feedback_vector, - Node* slot_id) { - Label extra_checks(this, Label::kDeferred), done(this); - - // Check if we have monomorphic {target} feedback already. - TNode<MaybeObject> feedback = - LoadFeedbackVectorSlot(feedback_vector, slot_id); - Comment("check if monomorphic"); - TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target)); - GotoIf(is_monomorphic, &done); - - // Check if it is a megamorphic {target}. - Comment("check if megamorphic"); - TNode<BoolT> is_megamorphic = TaggedEqual( - feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); - Branch(is_megamorphic, &done, &extra_checks); - - BIND(&extra_checks); - { - Label initialize(this), mark_megamorphic(this); - - Comment("check if weak reference"); - TNode<BoolT> is_uninitialized = TaggedEqual( - feedback, - HeapConstant(FeedbackVector::UninitializedSentinel(isolate()))); - GotoIf(is_uninitialized, &initialize); - CSA_ASSERT(this, IsWeakOrCleared(feedback)); - - // If the weak reference is cleared, we have a new chance to become - // monomorphic. - Comment("check if weak reference is cleared"); - Branch(IsCleared(feedback), &initialize, &mark_megamorphic); - - BIND(&initialize); - { - // Check if {target} is a JSFunction in the current native context. - Comment("check if function in same native context"); - GotoIf(TaggedIsSmi(target), &mark_megamorphic); - // Check if the {target} is a JSFunction or JSBoundFunction - // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, target); - Label loop(this, &var_current), done_loop(this); - Goto(&loop); - BIND(&loop); - { - Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); - TNode<Uint16T> current_instance_type = LoadInstanceType(current); - GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), - &if_boundfunction); - Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), - &if_function, &mark_megamorphic); - - BIND(&if_function); - { - // Check that the JSFunction {current} is in the current native - // context. - TNode<Context> current_context = - CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = - LoadNativeContext(current_context); - Branch( - TaggedEqual(LoadNativeContext(context), current_native_context), - &done_loop, &mark_megamorphic); - } - - BIND(&if_boundfunction); - { - // Continue with the [[BoundTargetFunction]] of {target}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); - Goto(&loop); - } - } - BIND(&done_loop); - StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, - CAST(target)); - ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize"); - Goto(&done); - } - - BIND(&mark_megamorphic); - { - // MegamorphicSentinel is an immortal immovable object so - // write-barrier is not needed. - Comment("transition to megamorphic"); - DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); - StoreFeedbackVectorSlot( - feedback_vector, slot_id, - HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), - SKIP_WRITE_BARRIER); - ReportFeedbackUpdate(feedback_vector, slot_id, - "Call:TransitionMegamorphic"); - Goto(&done); - } - } - - BIND(&done); -} - -void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context, - Node* maybe_feedback_vector, - Node* slot_id) { - Label feedback_done(this); - // If feedback_vector is not valid, then nothing to do. - GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done); - - CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector)); - - // Increment the call count. - IncrementCallCount(maybe_feedback_vector, slot_id); - - // Collect the callable {target} feedback. - CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id); - Goto(&feedback_done); - - BIND(&feedback_done); -} - void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, const RegListNodePair& args, + TNode<Object> function, TNode<Context> context, const RegListNodePair& args, ConvertReceiverMode receiver_mode) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) || bytecode_ == Bytecode::kInvokeIntrinsic); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode); - Node* args_count; + TNode<Word32T> args_count; if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The receiver is implied, so it is not in the argument list. args_count = args.reg_count(); @@ -879,8 +753,9 @@ void InterpreterAssembler::CallJSAndDispatch( } template <class... TArgs> -void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, - Node* arg_count, +void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function, + TNode<Context> context, + TNode<Word32T> arg_count, ConvertReceiverMode receiver_mode, TArgs... args) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); @@ -892,9 +767,9 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The first argument parameter (the receiver) is implied to be undefined. - TailCallStubThenBytecodeDispatch( - callable.descriptor(), code_target, context, function, arg_count, - static_cast<Node*>(UndefinedConstant()), args...); + TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, + context, function, arg_count, + UndefinedConstant(), args...); } else { TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, function, arg_count, args...); @@ -906,21 +781,22 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, // Instantiate CallJSAndDispatch() for argument counts used by interpreter // generator. template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, ConvertReceiverMode receiver_mode); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*, Node*, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>, + TNode<Object>); void InterpreterAssembler::CallJSWithSpreadAndDispatch( - Node* function, Node* context, const RegListNodePair& args, Node* slot_id, - Node* maybe_feedback_vector) { + TNode<Object> function, TNode<Context> context, const RegListNodePair& args, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny); CollectCallFeedback(function, context, maybe_feedback_vector, slot_id); @@ -939,16 +815,18 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch( accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } -Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, - SloppyTNode<Object> new_target, - const RegListNodePair& args, - Node* slot_id, Node* feedback_vector) { +TNode<Object> InterpreterAssembler::Construct( + TNode<Object> target, TNode<Context> context, TNode<Object> new_target, + const RegListNodePair& args, TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); - VARIABLE(var_result, MachineRepresentation::kTagged); - VARIABLE(var_site, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); + TVARIABLE(AllocationSite, var_site); Label extra_checks(this, Label::kDeferred), return_result(this, &var_result), construct(this), construct_array(this, &var_site); - GotoIf(IsUndefined(feedback_vector), &construct); + GotoIf(IsUndefined(maybe_feedback_vector), &construct); + + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); @@ -956,7 +834,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, // Check if we have monomorphic {new_target} feedback already. TNode<MaybeObject> feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); - Branch(IsWeakReferenceTo(feedback, new_target), &construct, &extra_checks); + Branch(IsWeakReferenceToObject(feedback, new_target), &construct, + &extra_checks); BIND(&extra_checks); { @@ -989,7 +868,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX); GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic); GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic); - var_site.Bind(strong_feedback); + var_site = CAST(strong_feedback); Goto(&construct_array); } @@ -1008,14 +887,13 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, new_target); + TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); + TNode<HeapObject> current = var_current.value(); TNode<Uint16T> current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); @@ -1028,7 +906,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, // context. TNode<Context> current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = + TNode<NativeContext> current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), @@ -1038,8 +916,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); + var_current = LoadObjectField<HeapObject>( + current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } @@ -1056,8 +934,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, BIND(&create_allocation_site); { - var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector, - SmiTag(slot_id))); + var_site = + CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id); ReportFeedbackUpdate(feedback_vector, slot_id, "Construct:CreateAllocationSite"); Goto(&construct_array); @@ -1097,9 +975,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kArrayFunction); TNode<Code> code_target = HeapConstant(callable.code()); - var_result.Bind(CallStub(callable.descriptor(), code_target, context, - args.reg_count(), args.base_reg_location(), target, - new_target, var_site.value())); + var_result = CallStub(callable.descriptor(), code_target, context, + args.reg_count(), args.base_reg_location(), target, + new_target, var_site.value()); Goto(&return_result); } @@ -1110,9 +988,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kOther); TNode<Code> code_target = HeapConstant(callable.code()); - var_result.Bind(CallStub(callable.descriptor(), code_target, context, - args.reg_count(), args.base_reg_location(), target, - new_target, UndefinedConstant())); + var_result = CallStub(callable.descriptor(), code_target, context, + args.reg_count(), args.base_reg_location(), target, + new_target, UndefinedConstant()); Goto(&return_result); } @@ -1120,17 +998,18 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, return var_result.value(); } -Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, - Node* new_target, - const RegListNodePair& args, - Node* slot_id, - Node* feedback_vector) { +TNode<Object> InterpreterAssembler::ConstructWithSpread( + TNode<Object> target, TNode<Context> context, TNode<Object> new_target, + const RegListNodePair& args, TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector) { // TODO(bmeurer): Unify this with the Construct bytecode feedback // above once we have a way to pass the AllocationSite to the Array // constructor _and_ spread the last argument at the same time. DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); Label extra_checks(this, Label::kDeferred), construct(this); - GotoIf(IsUndefined(feedback_vector), &construct); + GotoIf(IsUndefined(maybe_feedback_vector), &construct); + + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); @@ -1138,7 +1017,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, // Check if we have monomorphic {new_target} feedback already. TNode<MaybeObject> feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); - Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct, + Branch(IsWeakReferenceToObject(feedback, new_target), &construct, &extra_checks); BIND(&extra_checks); @@ -1174,14 +1053,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, new_target); + TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); + TNode<HeapObject> current = var_current.value(); TNode<Uint16T> current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); @@ -1194,7 +1072,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, // context. TNode<Context> current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = + TNode<NativeContext> current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), @@ -1204,8 +1082,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); + var_current = LoadObjectField<HeapObject>( + current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } @@ -1243,7 +1121,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, UndefinedConstant()); } -Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, +Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id, + TNode<Context> context, const RegListNodePair& args, int result_size) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); @@ -1252,22 +1131,22 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, TNode<Code> code_target = HeapConstant(callable.code()); // Get the function entry from the function id. - Node* function_table = ExternalConstant( - ExternalReference::runtime_function_table_address(isolate())); + TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant( + ExternalReference::runtime_function_table_address(isolate()))); TNode<Word32T> function_offset = Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function))); TNode<WordT> function = IntPtrAdd(function_table, ChangeUint32ToWord(function_offset)); - Node* function_entry = - Load(MachineType::Pointer(), function, - IntPtrConstant(offsetof(Runtime::Function, entry))); + TNode<RawPtrT> function_entry = Load<RawPtrT>( + function, IntPtrConstant(offsetof(Runtime::Function, entry))); return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(), result_size, code_target, context, args.reg_count(), args.base_reg_location(), function_entry); } -void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { +void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight, + bool backward) { Comment("[ UpdateInterruptBudget"); // Assert that the weight is positive (negative weights should be implemented @@ -1289,7 +1168,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { TVARIABLE(Int32T, new_budget); if (backward) { // Update budget by |weight| and check if it reaches zero. - new_budget = Signed(Int32Sub(budget_after_bytecode, weight)); + new_budget = Int32Sub(budget_after_bytecode, weight); TNode<BoolT> condition = Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0)); Label ok(this), interrupt_check(this, Label::kDeferred); @@ -1303,7 +1182,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { } else { // For a forward jump, we know we only increase the interrupt budget, so // no need to check if it's below zero. - new_budget = Signed(Int32Add(budget_after_bytecode, weight)); + new_budget = Int32Add(budget_after_bytecode, weight); } // Update budget. @@ -1323,7 +1202,7 @@ TNode<IntPtrT> InterpreterAssembler::Advance(int delta) { return Advance(IntPtrConstant(delta)); } -TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta, +TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta, bool backward) { #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeExit); @@ -1334,45 +1213,51 @@ TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta, return next_offset; } -Node* InterpreterAssembler::Jump(Node* delta, bool backward) { +void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) { DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_)); - UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward); - Node* new_bytecode_offset = Advance(delta, backward); - TNode<WordT> target_bytecode = LoadBytecode(new_bytecode_offset); - return DispatchToBytecode(target_bytecode, new_bytecode_offset); + UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward); + TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward); + TNode<RawPtrT> target_bytecode = + UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset)); + DispatchToBytecode(target_bytecode, new_bytecode_offset); } -Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); } +void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) { + Jump(jump_offset, false); +} -Node* InterpreterAssembler::JumpBackward(Node* delta) { - return Jump(delta, true); +void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) { + Jump(jump_offset, true); } -void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) { +void InterpreterAssembler::JumpConditional(TNode<BoolT> condition, + TNode<IntPtrT> jump_offset) { Label match(this), no_match(this); Branch(condition, &match, &no_match); BIND(&match); - Jump(delta); + Jump(jump_offset); BIND(&no_match); Dispatch(); } void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs, - TNode<Object> rhs, Node* delta) { - JumpConditional(TaggedEqual(lhs, rhs), delta); + TNode<Object> rhs, + TNode<IntPtrT> jump_offset) { + JumpConditional(TaggedEqual(lhs, rhs), jump_offset); } void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs, - Node* delta) { - JumpConditional(TaggedNotEqual(lhs, rhs), delta); + TNode<IntPtrT> jump_offset) { + JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset); } -TNode<WordT> InterpreterAssembler::LoadBytecode(Node* bytecode_offset) { - Node* bytecode = - Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset); +TNode<WordT> InterpreterAssembler::LoadBytecode( + TNode<IntPtrT> bytecode_offset) { + TNode<Uint8T> bytecode = + Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset); return ChangeUint32ToWord(bytecode); } @@ -1418,51 +1303,39 @@ void InterpreterAssembler::InlineStar() { accumulator_use_ = previous_acc_use; } -Node* InterpreterAssembler::Dispatch() { +void InterpreterAssembler::Dispatch() { Comment("========= Dispatch"); DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); - Node* target_offset = Advance(); + TNode<IntPtrT> target_offset = Advance(); TNode<WordT> target_bytecode = LoadBytecode(target_offset); if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) { target_bytecode = StarDispatchLookahead(target_bytecode); } - return DispatchToBytecode(target_bytecode, BytecodeOffset()); + DispatchToBytecode(target_bytecode, BytecodeOffset()); } -Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode, - Node* new_bytecode_offset) { +void InterpreterAssembler::DispatchToBytecode( + TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) { if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(target_bytecode); } - Node* target_code_entry = - Load(MachineType::Pointer(), DispatchTableRawPointer(), - TimesSystemPointerSize(target_bytecode)); - - return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset, - target_bytecode); -} + TNode<RawPtrT> target_code_entry = Load<RawPtrT>( + DispatchTablePointer(), TimesSystemPointerSize(target_bytecode)); -Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler, - Node* bytecode_offset, - Node* target_bytecode) { - // TODO(ishell): Add CSA::CodeEntryPoint(code). - TNode<IntPtrT> handler_entry = - IntPtrAdd(BitcastTaggedToWord(handler), - IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); - return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset, - target_bytecode); + DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset); } -Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry( - Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) { +void InterpreterAssembler::DispatchToBytecodeHandlerEntry( + TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) { // Propagate speculation poisoning. - TNode<WordT> poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry); - return TailCallBytecodeDispatch( - InterpreterDispatchDescriptor{}, poisoned_handler_entry, - GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(), - DispatchTableRawPointer()); + TNode<RawPtrT> poisoned_handler_entry = + UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry)); + TailCallBytecodeDispatch(InterpreterDispatchDescriptor{}, + poisoned_handler_entry, GetAccumulatorUnchecked(), + bytecode_offset, BytecodeArrayTaggedPointer(), + DispatchTablePointer()); } void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { @@ -1474,14 +1347,14 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { // Indices 256-511 correspond to bytecodes with operand_scale == 1 // Indices 512-767 correspond to bytecodes with operand_scale == 2 DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); - Node* next_bytecode_offset = Advance(1); + TNode<IntPtrT> next_bytecode_offset = Advance(1); TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset); if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(next_bytecode); } - Node* base_index; + TNode<IntPtrT> base_index; switch (operand_scale) { case OperandScale::kDouble: base_index = IntPtrConstant(1 << kBitsPerByte); @@ -1493,12 +1366,10 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { UNREACHABLE(); } TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode); - Node* target_code_entry = - Load(MachineType::Pointer(), DispatchTableRawPointer(), - TimesSystemPointerSize(target_index)); + TNode<RawPtrT> target_code_entry = Load<RawPtrT>( + DispatchTablePointer(), TimesSystemPointerSize(target_index)); - DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset, - next_bytecode); + DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset); } void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { @@ -1527,10 +1398,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { UpdateInterruptBudget(profiling_weight, true); } -Node* InterpreterAssembler::LoadOsrNestingLevel() { - return LoadObjectField(BytecodeArrayTaggedPointer(), - BytecodeArray::kOsrNestingLevelOffset, - MachineType::Int8()); +TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() { + return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(), + BytecodeArray::kOsrNestingLevelOffset); } void InterpreterAssembler::Abort(AbortReason abort_reason) { @@ -1551,7 +1421,7 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs, BIND(&ok); } -void InterpreterAssembler::MaybeDropFrames(Node* context) { +void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) { TNode<ExternalReference> restart_fp_address = ExternalConstant(ExternalReference::debug_restart_fp_address(isolate())); @@ -1576,7 +1446,7 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) { SmiTag(BytecodeOffset()), GetAccumulatorUnchecked()); } -void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) { +void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) { TNode<ExternalReference> counters_table = ExternalConstant( ExternalReference::interpreter_dispatch_counters(isolate())); TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant( @@ -1616,8 +1486,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() { } void InterpreterAssembler::AbortIfRegisterCountInvalid( - Node* parameters_and_registers, Node* formal_parameter_count, - Node* register_count) { + TNode<FixedArrayBase> parameters_and_registers, + TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) { TNode<IntPtrT> array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers); @@ -1633,13 +1503,13 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid( BIND(&ok); } -Node* InterpreterAssembler::ExportParametersAndRegisterFile( +TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile( TNode<FixedArray> array, const RegListNodePair& registers, TNode<Int32T> formal_parameter_count) { // Store the formal parameters (without receiver) followed by the // registers into the generator's internal parameters_and_registers field. TNode<IntPtrT> formal_parameter_count_intptr = - ChangeInt32ToIntPtr(formal_parameter_count); + Signed(ChangeUint32ToWord(formal_parameter_count)); TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), @@ -1649,8 +1519,8 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( } { - Variable var_index(this, MachineType::PointerRepresentation()); - var_index.Bind(IntPtrConstant(0)); + TVARIABLE(IntPtrT, var_index); + var_index = IntPtrConstant(0); // Iterate over parameters and write them into the array. Label loop(this, &var_index), done_loop(this); @@ -1662,16 +1532,16 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( Goto(&loop); BIND(&loop); { - Node* index = var_index.value(); + TNode<IntPtrT> index = var_index.value(); GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr), &done_loop); - TNode<WordT> reg_index = IntPtrSub(reg_base, index); + TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index); TNode<Object> value = LoadRegister(reg_index); StoreFixedArrayElement(array, index, value); - var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); + var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); @@ -1681,25 +1551,25 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( // Iterate over register file and write values into array. // The mapping of register to array index must match that used in // BytecodeGraphBuilder::VisitResumeGenerator. - Variable var_index(this, MachineType::PointerRepresentation()); - var_index.Bind(IntPtrConstant(0)); + TVARIABLE(IntPtrT, var_index); + var_index = IntPtrConstant(0); Label loop(this, &var_index), done_loop(this); Goto(&loop); BIND(&loop); { - Node* index = var_index.value(); + TNode<IntPtrT> index = var_index.value(); GotoIfNot(UintPtrLessThan(index, register_count), &done_loop); - TNode<WordT> reg_index = + TNode<IntPtrT> reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); TNode<Object> value = LoadRegister(reg_index); - TNode<WordT> array_index = + TNode<IntPtrT> array_index = IntPtrAdd(formal_parameter_count_intptr, index); StoreFixedArrayElement(array, array_index, value); - var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); + var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); @@ -1708,11 +1578,11 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( return array; } -Node* InterpreterAssembler::ImportRegisterFile( +TNode<FixedArray> InterpreterAssembler::ImportRegisterFile( TNode<FixedArray> array, const RegListNodePair& registers, TNode<Int32T> formal_parameter_count) { TNode<IntPtrT> formal_parameter_count_intptr = - ChangeInt32ToIntPtr(formal_parameter_count); + Signed(ChangeUint32ToWord(formal_parameter_count)); TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), @@ -1758,8 +1628,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { TNode<Object> object = GetAccumulator(); TNode<Context> context = GetContext(); - Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); - Variable var_result(this, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_type_feedback); + TVARIABLE(Numeric, var_result); Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this), if_objectisother(this, Label::kDeferred); @@ -1768,15 +1638,15 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { BIND(&if_objectissmi); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); Goto(&if_done); } BIND(&if_objectisheapnumber); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); Goto(&if_done); } @@ -1789,23 +1659,23 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { Label not_bigint(this); GotoIfNot(IsBigInt(CAST(object)), ¬_bigint); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); Goto(&if_done); } BIND(¬_bigint); } // Convert {object} by calling out to the appropriate builtin. - var_result.Bind(CallBuiltin(builtin, context, object)); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + var_result = CAST(CallBuiltin(builtin, context, object)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny); Goto(&if_done); } BIND(&if_done); // Record the type feedback collected for {object}. - Node* slot_index = BytecodeOperandIdx(0); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(0); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index); |