summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc')
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc73
1 files changed, 49 insertions, 24 deletions
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 53864ad2e9..c71a63cc3d 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
+ case Constant::kCompressedHeapObject: // Fall through.
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kDelayedStringConstant:
@@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0).is(x1));
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ Debug("kArchDebugAbort", 0, BREAK);
+ __ Debug("kArchAbortCSAAssert", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
@@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64DmbIsh:
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
case kArm64DsbIsb:
__ Dsb(FullSystem, BarrierAll);
__ Isb();
@@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
+ __ Cmp(i.OutputRegister32(), 0); \
+ __ Cset(i.OutputRegister32(), ne); \
break; \
}
SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
@@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
- __ StartBlockPools();
- __ Bind(&table);
- for (size_t index = 0; index < case_count; ++index) {
- __ B(GetLabel(i.InputRpo(index + 2)));
+ {
+ TurboAssembler::BlockPoolsScope block_pools(tasm(),
+ case_count * kInstrSize);
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
}
- __ EndBlockPools();
}
void CodeGenerator::FinishFrame(Frame* frame) {
@@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() {
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
@@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() {
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
case CallDescriptor::kCallAddress:
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ required_slots += 2; // marker + saved c_entry_fp.
+ }
__ Claim(required_slots);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
+ __ Str(scratch,
+ MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ }
break;
default:
UNREACHABLE();
@@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Mov(dst, src_object);
}
+ } else if (src.type() == Constant::kCompressedHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ // TODO(v8:8977): Even though this mov happens on 32 bits (Note the
+ // .W()) and we are passing along the RelocInfo, we still haven't made
+ // the address embedded in the code-stream actually be compressed.
+ __ Mov(dst.W(),
+ Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+ }
} else {
__ Mov(dst, g.ToImmediate(source));
}