summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm/macro-assembler-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm/macro-assembler-arm.cc')
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc584
1 files changed, 183 insertions, 401 deletions
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 86eac5db1a..2950de0a0c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -11,6 +11,7 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -30,45 +31,94 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- stm(db_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- SaveFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ stm(db_w, sp, list);
+
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ SaveFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
RestoreFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ ldm(ia_w, sp, list);
+
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
@@ -234,7 +284,7 @@ void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
- if (scratch.is(no_reg)) {
+ if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
@@ -268,33 +318,33 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
}
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov(dst, src, LeaveCC, cond);
}
}
void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src, cond);
}
}
void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src, cond);
}
}
void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src);
}
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
- if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
+ if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
@@ -302,8 +352,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
- DCHECK(!srcdst0.is(kScratchDoubleReg));
- DCHECK(!srcdst1.is(kScratchDoubleReg));
+ DCHECK(srcdst0 != kScratchDoubleReg);
+ DCHECK(srcdst1 != kScratchDoubleReg);
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
@@ -311,7 +361,7 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
- if (!srcdst0.is(srcdst1)) {
+ if (srcdst0 != srcdst1) {
vswp(srcdst0, srcdst1);
}
}
@@ -324,7 +374,7 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!srcA.is(scratch));
+ DCHECK(srcA != scratch);
mul(scratch, src1, src2, LeaveCC, cond);
sub(dst, srcA, scratch, LeaveCC, cond);
}
@@ -448,17 +498,12 @@ void MacroAssembler::InNewSpace(Register object,
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -481,14 +526,8 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(object,
- dst,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
@@ -500,94 +539,78 @@ void MacroAssembler::RecordWriteField(
}
}
-// Will clobber 3 registers: object, map and dst. The register 'object' contains
-// a heap object pointer. A scratch register also needs to be available.
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- cmp(dst, Operand(isolate()->factory()->meta_map()));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
- if (!FLAG_incremental_marking) {
- return;
- }
+ stm(db_w, sp, regs);
+}
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- cmp(scratch, map);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ ldm(ia_w, sp, regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- tst(dst, Operand(kPointerSize - 1));
- b(eq, &ok);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(lr);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(lr);
- }
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- bind(&done);
+ Push(object);
+ Push(address);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- {
- UseScratchRegisterScope temps(this);
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
- temps.Acquire(), dst);
- }
+ Pop(slot_parameter);
+ Pop(object_parameter);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
- mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
- }
+ Move(isolate_parameter,
+ Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
+
+ RestoreRegisters(registers);
}
// Will clobber 3 registers: object, address, and value. The register 'object'
// contains a heap object pointer. The heap object tag is shifted away.
// A scratch register also needs to be available.
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -609,13 +632,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -626,9 +645,13 @@ void MacroAssembler::RecordWrite(
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
@@ -652,10 +675,8 @@ void MacroAssembler::RecordWrite(
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -677,20 +698,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
- if (and_then == kFallThroughAtEnd) {
- b(ne, &done);
- } else {
- DCHECK(and_then == kReturnAtEnd);
- Ret(ne);
- }
+ Ret(ne);
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
@@ -854,8 +868,8 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void TurboAssembler::VmovExtended(Register dst, int src_code) {
- DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
+ DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
+ DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
if (src_code & 0x1) {
VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
} else {
@@ -864,8 +878,8 @@ void TurboAssembler::VmovExtended(Register dst, int src_code) {
}
void TurboAssembler::VmovExtended(int dst_code, Register src) {
- DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
+ DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
+ DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
} else {
@@ -876,8 +890,8 @@ void TurboAssembler::VmovExtended(int dst_code, Register src) {
void TurboAssembler::VmovExtended(int dst_code, int src_code) {
if (src_code == dst_code) return;
- if (src_code < SwVfpRegister::kMaxNumRegisters &&
- dst_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters &&
+ dst_code < SwVfpRegister::kNumRegisters) {
// src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code));
@@ -896,13 +910,13 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
src_offset = dst_offset ^ 1;
}
if (dst_offset) {
- if (dst_d_reg.is(src_d_reg)) {
+ if (dst_d_reg == src_d_reg) {
vdup(Neon32, dst_d_reg, src_d_reg, 0);
} else {
vsli(Neon64, dst_d_reg, src_d_reg, 32);
}
} else {
- if (dst_d_reg.is(src_d_reg)) {
+ if (dst_d_reg == src_d_reg) {
vdup(Neon32, dst_d_reg, src_d_reg, 1);
} else {
vsri(Neon64, dst_d_reg, src_d_reg, 32);
@@ -915,13 +929,13 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
// s-registers.
int scratchSCode = kScratchDoubleReg.low().code();
int scratchSCode2 = kScratchDoubleReg2.low().code();
- if (src_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters) {
// src is an s-register, dst is not.
vmov(kScratchDoubleReg, dst_d_reg);
vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
SwVfpRegister::from_code(src_code));
vmov(dst_d_reg, kScratchDoubleReg);
- } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
+ } else if (dst_code < SwVfpRegister::kNumRegisters) {
// dst is an s-register, src is not.
vmov(kScratchDoubleReg, src_d_reg);
vmov(SwVfpRegister::from_code(dst_code),
@@ -938,7 +952,7 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
}
void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
- if (dst_code < SwVfpRegister::kMaxNumRegisters) {
+ if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
// TODO(bbudge) If Neon supported, use load single lane form of vld1.
@@ -950,7 +964,7 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
}
void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
- if (src_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
// TODO(bbudge) If Neon supported, use store single lane form of vst1.
@@ -1181,19 +1195,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(lr, fp, context, target);
- add(fp, sp, Operand(2 * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(lr, fp, context, target);
-}
-
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -1229,7 +1230,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
SaveFPRegs(sp, scratch);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
- // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
+ // DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -1276,8 +1277,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
- sub(r3, fp,
- Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
+ sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
RestoreFPRegs(r3, scratch);
}
@@ -1401,8 +1401,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- DCHECK(actual.is_immediate() || actual.reg().is(r0));
- DCHECK(expected.is_immediate() || expected.reg().is(r2));
+ DCHECK(actual.is_immediate() || actual.reg() == r0);
+ DCHECK(expected.is_immediate() || expected.reg() == r2);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1496,8 +1496,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(r1));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
+ DCHECK(function == r1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1537,7 +1537,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
- DCHECK(fun.is(r1));
+ DCHECK(fun == r1);
Register expected_reg = r2;
Register temp_reg = r4;
@@ -1560,7 +1560,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
- DCHECK(function.is(r1));
+ DCHECK(function == r1);
// Get the function and setup the context.
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1615,118 +1615,12 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- UseScratchRegisterScope temps(this);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register alloc_limit = temps.Acquire();
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit.
- ldm(ia, top_address, result.bit() | alloc_limit.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- ldr(alloc_limit, MemOperand(top_address));
- cmp(result, alloc_limit);
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load allocation limit. Result already contains allocation top.
- ldr(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(alloc_limit));
- b(hs, gc_required);
- }
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. We have already acquired the scratch register at
- // this point, so we cannot just use add().
- DCHECK(object_size > 0);
- Register source = result;
- int shift = 0;
- while (object_size != 0) {
- if (((object_size >> shift) & 0x03) == 0) {
- shift += 2;
- } else {
- int bits = object_size & (0xff << shift);
- object_size -= bits;
- shift += 8;
- Operand bits_operand(bits);
- DCHECK(bits_operand.InstructionsRequired(this) == 1);
- add(result_end, source, bits_operand);
- source = result_end;
- }
- }
-
- cmp(result_end, Operand(alloc_limit));
- b(hi, gc_required);
-
- str(result_end, MemOperand(top_address));
-
- // Tag object.
- add(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
UseScratchRegisterScope temps(this);
- const Register temp = type_reg.is(no_reg) ? temps.Acquire() : type_reg;
+ const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
@@ -1745,57 +1639,11 @@ void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!obj.is(scratch));
+ DCHECK(obj != scratch);
LoadRoot(scratch, index);
cmp(obj, scratch);
}
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success) {
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success) {
- cmp(obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success);
- b(ne, fail);
- bind(&success);
-}
-
-void MacroAssembler::CheckMap(Register obj, Register scratch,
- Heap::RootListIndex index, Label* fail,
- SmiCheckType smi_check_type) {
- UseScratchRegisterScope temps(this);
- Register root_register = temps.Acquire();
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(root_register, index);
- cmp(scratch, root_register);
- b(ne, fail);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -1884,7 +1732,7 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
- DCHECK(!double_input.is(double_scratch));
+ DCHECK(double_input != double_scratch);
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
@@ -2066,21 +1914,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -2219,20 +2052,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -2245,28 +2064,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
- str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- str(value, FieldMemOperand(result, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
void TurboAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
@@ -2294,7 +2091,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
- DCHECK(!left.is(right));
+ DCHECK(left != right);
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -2306,7 +2103,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
VFPCompareAndSetFlags(left, right);
b(vs, out_of_line);
// Avoid a conditional instruction if the result register is unique.
- bool aliased_result_reg = result.is(left) || result.is(right);
+ bool aliased_result_reg = result == left || result == right;
Move(result, right, aliased_result_reg ? mi : al);
Move(result, left, gt);
b(ne, &done);
@@ -2322,7 +2119,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
template <typename T>
void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
- DCHECK(!left.is(right));
+ DCHECK(left != right);
// ARMv8: At least one of left and right is a NaN.
// Anything else: At least one of left and right is a NaN, or both left and
@@ -2338,7 +2135,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
- DCHECK(!left.is(right));
+ DCHECK(left != right);
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -2350,7 +2147,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
VFPCompareAndSetFlags(left, right);
b(vs, out_of_line);
// Avoid a conditional instruction if the result register is unique.
- bool aliased_result_reg = result.is(left) || result.is(right);
+ bool aliased_result_reg = result == left || result == right;
Move(result, left, aliased_result_reg ? mi : al);
Move(result, right, gt);
b(ne, &done);
@@ -2364,13 +2161,13 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
// We could use a single 'vorr' instruction here if we had NEON support.
// The algorithm used is -((-L) + (-R)), which is most efficiently expressed
// as -((-L) - R).
- if (left.is(result)) {
- DCHECK(!right.is(result));
+ if (left == result) {
+ DCHECK(right != result);
vneg(result, left);
vsub(result, result, right);
vneg(result, result);
} else {
- DCHECK(!left.is(result));
+ DCHECK(left != result);
vneg(result, right);
vsub(result, result, left);
vneg(result, result);
@@ -2381,7 +2178,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
template <typename T>
void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
- DCHECK(!left.is(right));
+ DCHECK(left != right);
// At least one of left and right is a NaN. Use vadd to propagate the NaN
// appropriately. +/-0 is handled inline.
@@ -2428,21 +2225,6 @@ void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
FloatMinOutOfLineHelper(result, left, right);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- and_(scratch1, first, Operand(kFlatOneByteStringMask));
- and_(scratch2, second, Operand(kFlatOneByteStringMask));
- cmp(scratch1, Operand(kFlatOneByteStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
- b(ne, failure);
-}
-
static const int kRegisterPassedArguments = 4;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2488,7 +2270,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
- DCHECK(src.is(d0));
+ DCHECK(src == d0);
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src);
}
@@ -2502,8 +2284,8 @@ void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
DwVfpRegister src2) {
- DCHECK(src1.is(d0));
- DCHECK(src2.is(d1));
+ DCHECK(src1 == d0);
+ DCHECK(src2 == d1);
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src1);
vmov(r2, r3, src2);