summaryrefslogtreecommitdiff
path: root/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h')
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h103
1 files changed, 36 insertions, 67 deletions
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index 5cb855e416..b63499c85c 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -39,35 +39,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = static_cast<uint32_t>(eq),
- kNotEqual = static_cast<uint32_t>(ne),
-
- kLessThan = static_cast<uint32_t>(lt),
- kGreaterThan = static_cast<uint32_t>(gt),
- kLessThanEqual = static_cast<uint32_t>(le),
- kGreaterThanEqual = static_cast<uint32_t>(ge),
-
- kUnsignedLessThan = static_cast<uint32_t>(lo),
- kUnsignedGreaterThan = static_cast<uint32_t>(hi),
- kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
- kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
-
- kOverflow = static_cast<uint32_t>(vs),
- kNoOverflow = static_cast<uint32_t>(vc),
-
- kZero = static_cast<uint32_t>(eq),
- kNotZero = static_cast<uint32_t>(ne),
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- // This is important for arm, where the internal::Condition where each value
- // represents an encoded bit field value.
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -132,13 +103,21 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
__ tst(value, Operand(mask));
- __ b(AsMasmCondition(cc), target);
+ __ b(cc, target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ cmp(lhs, Operand(rhs));
- __ b(AsMasmCondition(cc), target);
+ __ b(cc, target);
+}
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -338,8 +317,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
@@ -355,11 +334,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ ldr(output, FieldMemOperand(source, offset));
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ ldrh(output, FieldMemOperand(source, offset));
@@ -401,15 +375,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
+ __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ b(eq, on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@@ -427,8 +401,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@@ -450,8 +424,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@@ -466,16 +440,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -484,33 +458,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -536,8 +506,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
- &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
__ CheckConstPool(true, true);
__ BlockConstPoolFor(num_labels);
@@ -591,8 +560,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- __ JumpIf(Condition::kGreaterThanEqual, params_size,
- Operand(actual_params_size), &corrected_args_count);
+ __ JumpIf(kGreaterThanEqual, params_size, Operand(actual_params_size),
+ &corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -600,8 +569,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}