summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64/macro-assembler-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64/macro-assembler-x64.cc')
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc177
1 files changed, 172 insertions, 5 deletions
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index dc77c89485..96b45e8427 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -178,6 +178,11 @@ void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register smi_index) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are rsi.
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
@@ -186,6 +191,17 @@ void MacroAssembler::RecordWrite(Register object,
RecordWriteNonSmi(object, offset, value, smi_index);
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors. This clobbering repeats the
+ // clobbering done inside RecordWriteNonSmi but it's necessary to
+ // avoid having the fast case for smis leave the registers
+ // unchanged.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -194,6 +210,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Register scratch,
Register smi_index) {
Label done;
+
+ if (FLAG_debug_code) {
+ Label okay;
+ JumpIfNotSmi(object, &okay);
+ Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
+ bind(&okay);
+ }
+
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
movq(scratch, object);
@@ -243,6 +267,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -344,10 +376,14 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- CallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ movq(rax, Immediate(num_arguments));
+ movq(rbx, ExternalReference(f));
+ CEntryStub ces(f->result_size);
+ CallStub(&ces);
}
@@ -581,6 +617,31 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
}
+Condition MacroAssembler::CheckBothPositiveSmi(Register first,
+ Register second) {
+ if (first.is(second)) {
+ return CheckPositiveSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
+ return zero;
+}
+
+
+
+Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
+ if (first.is(second)) {
+ return CheckSmi(first);
+ }
+ movl(kScratchRegister, first);
+ andl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
movq(kScratchRegister, src);
@@ -649,7 +710,17 @@ void MacroAssembler::SmiSub(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ }
+ Assert(no_overflow, "Smi substraction onverflow");
+ } else if (dst.is(src1)) {
subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -1281,6 +1352,46 @@ void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
}
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
@@ -1474,6 +1585,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ testb(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss) {
@@ -2387,6 +2509,51 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
+int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
+ // On Windows stack slots are reserved by the caller for all arguments
+ // including the ones passed in registers. On Linux 6 arguments are passed in
+ // registers and the caller does not reserve stack slots for them.
+ ASSERT(num_arguments >= 0);
+#ifdef _WIN64
+ static const int kArgumentsWithoutStackSlot = 0;
+#else
+ static const int kArgumentsWithoutStackSlot = 6;
+#endif
+ return num_arguments > kArgumentsWithoutStackSlot ?
+ num_arguments - kArgumentsWithoutStackSlot : 0;
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ ASSERT(frame_alignment != 0);
+ ASSERT(num_arguments >= 0);
+ // Make stack end at alignment and allocate space for arguments and old rsp.
+ movq(kScratchRegister, rsp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
+ and_(rsp, Immediate(-frame_alignment));
+ movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ movq(rax, function);
+ CallCFunction(rax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ call(function);
+ ASSERT(OS::ActivationFrameAlignment() != 0);
+ ASSERT(num_arguments >= 0);
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+}
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {