diff options
Diffstat (limited to 'deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h')
-rw-r--r-- | deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h | 184 |
1 files changed, 45 insertions, 139 deletions
diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h index 90a06006bb..74a3db7ca3 100644 --- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -49,92 +49,6 @@ class BaselineAssembler::ScratchRegisterScope { int registers_used_; }; -// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. -enum class Condition : uint32_t { - kEqual, - kNotEqual, - - kLessThan, - kGreaterThan, - kLessThanEqual, - kGreaterThanEqual, - - kUnsignedLessThan, - kUnsignedGreaterThan, - kUnsignedLessThanEqual, - kUnsignedGreaterThanEqual, - - kOverflow, - kNoOverflow, - - kZero, - kNotZero -}; - -inline internal::Condition AsMasmCondition(Condition cond) { - static_assert(sizeof(internal::Condition) == sizeof(Condition)); - switch (cond) { - case Condition::kEqual: - return eq; - case Condition::kNotEqual: - return ne; - case Condition::kLessThan: - return lt; - case Condition::kGreaterThan: - return gt; - case Condition::kLessThanEqual: - return le; - case Condition::kGreaterThanEqual: - return ge; - - case Condition::kUnsignedLessThan: - return lt; - case Condition::kUnsignedGreaterThan: - return gt; - case Condition::kUnsignedLessThanEqual: - return le; - case Condition::kUnsignedGreaterThanEqual: - return ge; - - case Condition::kOverflow: - return overflow; - case Condition::kNoOverflow: - return nooverflow; - - case Condition::kZero: - return eq; - case Condition::kNotZero: - return ne; - default: - UNREACHABLE(); - } -} - -inline bool IsSignedCondition(Condition cond) { - switch (cond) { - case Condition::kEqual: - case Condition::kNotEqual: - case Condition::kLessThan: - case Condition::kGreaterThan: - case Condition::kLessThanEqual: - case Condition::kGreaterThanEqual: - case Condition::kOverflow: - case Condition::kNoOverflow: - case Condition::kZero: - case Condition::kNotZero: - return true; - - case Condition::kUnsignedLessThan: - case Condition::kUnsignedGreaterThan: - case Condition::kUnsignedLessThanEqual: - case Condition::kUnsignedGreaterThanEqual: - return false; - - default: - UNREACHABLE(); - } -} - #define __ assm-> // ppc helper template <int width = 64> @@ -143,19 +57,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs, static_assert(width == 64 || width == 32, "only support 64 and 32 bit compare"); if (width == 64) { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs); } else { __ CmpU64(lhs, rhs); } } else { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS32(lhs, rhs); } else { __ CmpU32(lhs, rhs); } } - __ b(AsMasmCondition(cc), target); + __ b(to_condition(cc), target); } #undef __ @@ -221,18 +135,27 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); __ AndU64(r0, value, Operand(mask), ip, SetRC); - __ b(AsMasmCondition(cc), target, cr0); + __ b(to_condition(cc), target, cr0); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs, r0); } else { __ CmpU64(lhs, rhs, r0); } - __ b(AsMasmCondition(cc), target); + __ b(to_condition(cc), target); +} + +void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object, + InstanceType instance_type, + Label* target, + Label::Distance distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + JumpIfObjectType(cc, object, instance_type, scratch, target, distance); } void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, @@ -292,7 +215,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target); } @@ -300,7 +223,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target); } @@ -460,10 +383,10 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper<T...>::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0); + __ LoadTaggedField(output, FieldMemOperand(source, offset), r0); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -479,12 +402,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - ASM_CODE_COMMENT(masm_); - __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { ASM_CODE_COMMENT(masm_); @@ -529,15 +446,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. { ScratchRegisterScope temps(this); Register scratch = temps.AcquireScratch(); - __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch, r0); + __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch, r0); __ beq(on_result, cr0); __ mov(scratch, __ ClearedValue()); StoreTaggedFieldNoWriteBarrier( @@ -555,8 +472,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -580,8 +497,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -599,23 +516,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { ASM_CODE_COMMENT(masm_); - if (depth > 0) { - for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); - } - if (COMPRESS_POINTERS_BOOL) { - // Decompress tagged pointer. - __ AddS64(context, context, kPtrComprCageBaseRegister); - } + for (; depth > 0; --depth) { + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -625,34 +536,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -680,8 +587,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, } // Mostly copied from code-generator-arm.cc - JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels), - &fallthrough); + JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough); // Ensure to emit the constant pool first if necessary. int entry_size_log2 = 3; __ ShiftLeftU32(reg, reg, Operand(entry_size_log2)); @@ -743,8 +649,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // If actual is bigger than formal, then we should use it to free up the stack // arguments. Label corrected_args_count; - JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size, - actual_params_size, &corrected_args_count); + JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size, + &corrected_args_count); __ masm()->mr(params_size, actual_params_size); __ Bind(&corrected_args_count); @@ -752,8 +658,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } |