summaryrefslogtreecommitdiff
path: root/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h')
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h150
1 files changed, 63 insertions, 87 deletions
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 08a9491ce8..6aa1419e92 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -38,32 +38,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = eq,
- kNotEqual = ne,
-
- kLessThan = lt,
- kGreaterThan = gt,
- kLessThanEqual = le,
- kGreaterThanEqual = ge,
-
- kUnsignedLessThan = lo,
- kUnsignedGreaterThan = hi,
- kUnsignedLessThanEqual = ls,
- kUnsignedGreaterThanEqual = hs,
-
- kOverflow = vs,
- kNoOverflow = vc,
-
- kZero = eq,
- kNotZero = ne,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -125,19 +99,42 @@ void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
- if (cc == Condition::kZero) {
+ if (cc == kZero) {
__ TestAndBranchIfAllClear(value, mask, target);
- } else if (cc == Condition::kNotZero) {
+ } else if (cc == kNotZero) {
__ TestAndBranchIfAnySet(value, mask, target);
} else {
__ Tst(value, Immediate(mask));
- __ B(AsMasmCondition(cc), target);
+ __ B(cc, target);
}
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- __ CompareAndBranch(lhs, rhs, AsMasmCondition(cc), target);
+ __ CompareAndBranch(lhs, rhs, cc, target);
+}
+#if V8_STATIC_ROOTS_BOOL
+void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
+ Label* target,
+ Label::Distance distance) {
+ __ AssertNotSmi(heap_object);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
+}
+#endif // V8_STATIC_ROOTS_BOOL
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ if (cc == eq || cc == ne) {
+ __ IsObjectType(object, scratch, scratch, instance_type);
+ __ B(cc, target);
+ return;
+ }
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -173,14 +170,14 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance distance) {
__ AssertSmi(value);
- __ CompareTaggedAndBranch(value, smi, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(value, smi, cc, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- __ CompareTaggedAndBranch(lhs, rhs, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(lhs, rhs, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -188,7 +185,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
- __ CompareTaggedAndBranch(value, tmp, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(value, tmp, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@@ -196,7 +193,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
- __ CompareTaggedAndBranch(tmp, value, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(tmp, value, cc, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -395,9 +392,9 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@@ -412,11 +409,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ldrh(output, FieldMemOperand(source, offset));
@@ -456,27 +448,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
- Label fallthrough, clear_slot;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
- __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
-
- // Is it marked_for_deoptimization? If yes, clear the slot.
- {
- ScratchRegisterScope temps(this);
- __ JumpIfCodeTIsMarkedForDeoptimization(
- scratch_and_result, temps.AcquireScratch(), &clear_slot);
- __ B(on_result);
- }
-
- __ bind(&clear_slot);
- __ Mov(scratch_and_result, __ ClearedValue());
- StoreTaggedFieldNoWriteBarrier(
- feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
- scratch_and_result);
-
- __ bind(&fallthrough);
- Move(scratch_and_result, 0);
+ __ TryLoadOptimizedOsrCode(scratch_and_result, feedback_vector, slot,
+ on_result, Label::Distance::kFar);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
@@ -485,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@@ -508,8 +481,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@@ -524,16 +497,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -542,33 +515,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -601,16 +570,23 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
ScratchRegisterScope scope(this);
Register temp = scope.AcquireScratch();
Label table;
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
__ Adr(temp, &table);
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
+ constexpr int instructions_per_jump_target = 1;
+#else
+ constexpr int instructions_per_jump_target = 0;
#endif
+ constexpr int instructions_per_label = 1 + instructions_per_jump_target;
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
__ Br(temp);
{
- TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
+ const int instruction_count =
+ num_labels * instructions_per_label + instructions_per_jump_target;
+ MacroAssembler::BlockPoolsScope block_pools(masm_,
+ instruction_count * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ JumpTarget();
@@ -659,7 +635,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- __ JumpIf(Condition::kGreaterThanEqual, params_size, actual_params_size,
+ __ JumpIf(kGreaterThanEqual, params_size, actual_params_size,
&corrected_args_count);
__ masm()->Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -668,7 +644,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}