summaryrefslogtreecommitdiff
path: root/chromium/v8/src/x64/macro-assembler-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/x64/macro-assembler-x64.cc')
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc810
1 files changed, 525 insertions, 285 deletions
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index 69abc5454f0..6c3f50163ef 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -37,6 +37,7 @@
#include "serialize.h"
#include "debug.h"
#include "heap.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -44,7 +45,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
@@ -79,7 +79,7 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
- movq(scratch, target);
+ Move(scratch, target);
return Operand(scratch, 0);
}
@@ -97,7 +97,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
if (destination.is(rax)) {
load_rax(source);
} else {
- movq(kScratchRegister, source);
+ Move(kScratchRegister, source);
movq(destination, Operand(kScratchRegister, 0));
}
}
@@ -116,7 +116,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
if (source.is(rax)) {
store_rax(destination);
} else {
- movq(kScratchRegister, destination);
+ Move(kScratchRegister, destination);
movq(Operand(kScratchRegister, 0), source);
}
}
@@ -133,7 +133,7 @@ void MacroAssembler::LoadAddress(Register destination,
}
}
// Safe code.
- movq(destination, source);
+ Move(destination, source);
}
@@ -163,7 +163,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
@@ -275,20 +275,21 @@ void MacroAssembler::InNewSpace(Register object,
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
and_(scratch, kScratchRegister);
} else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
+ Move(scratch, ExternalReference::new_space_mask(isolate()));
and_(scratch, object);
}
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
@@ -309,11 +310,6 @@ void MacroAssembler::RecordWriteField(
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -344,8 +340,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(dst, kZapValue, RelocInfo::NONE64);
}
}
@@ -378,8 +374,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(index, kZapValue, RelocInfo::NONE64);
}
}
@@ -390,11 +386,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
-
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -413,6 +404,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
@@ -444,8 +439,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(address, kZapValue, RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
}
}
@@ -533,10 +528,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
+ movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
RelocInfo::NONE64);
push(kScratchRegister);
@@ -560,8 +554,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -573,8 +565,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -605,22 +596,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -635,7 +613,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
+ CEntryStub ces(f->result_size, save_doubles);
CallStub(&ces);
}
@@ -691,13 +669,16 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
@@ -719,7 +700,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
- movq(base_reg, next_address);
+ Move(base_reg, next_address);
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
@@ -750,7 +731,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
+ movq(rax, reinterpret_cast<Address>(function_address),
RelocInfo::EXTERNAL_REFERENCE);
bind(&end_profiler_check);
@@ -768,7 +749,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
}
// Load the value from ReturnValue
- movq(rax, Operand(rbp, return_value_offset * kPointerSize));
+ movq(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -780,9 +761,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
+ Move(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -819,11 +801,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movq(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -936,6 +926,50 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsxbq(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzxbl(dst, src);
+ } else if (r.IsInteger16()) {
+ movsxwq(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzxwl(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ movb(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ movw(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -944,7 +978,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE64);
+ movq(dst, x);
}
}
@@ -1009,18 +1043,9 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
+ movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, kUninitializedKSmiConstantRegister);
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
+ Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
if (value == 0) {
@@ -1058,7 +1083,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
+ movq(dst, source, RelocInfo::NONE64);
return;
}
if (negative) {
@@ -1081,11 +1106,7 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
- } else {
- int3();
- }
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
@@ -1423,28 +1444,6 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
}
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1504,7 +1503,8 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1512,15 +1512,32 @@ void MacroAssembler::SmiAddConstant(Register dst,
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ addq(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ subq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ subq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
+ } else {
+ CHECK(mode.IsEmpty());
+ }
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
@@ -1552,7 +1569,8 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1560,35 +1578,40 @@ void MacroAssembler::SmiSubConstant(Register dst,
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ addq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ addq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
} else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ CHECK(mode.IsEmpty());
}
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ ASSERT(!dst.is(kScratchRegister));
+ movq(dst, src);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
}
@@ -1616,6 +1639,29 @@ void MacroAssembler::SmiNeg(Register dst,
}
+template<class T>
+static void SmiAddHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->addq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->subq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movq(dst, src1);
+ masm->addq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
@@ -1623,16 +1669,7 @@ void MacroAssembler::SmiAdd(Register dst,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1642,17 +1679,8 @@ void MacroAssembler::SmiAdd(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1675,34 +1703,37 @@ void MacroAssembler::SmiAdd(Register dst,
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+template<class T>
+static void SmiSubHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
+ Label done;
+ masm->subq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->addq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
} else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
+ masm->movq(dst, src1);
+ masm->subq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
}
}
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1712,29 +1743,36 @@ void MacroAssembler::SmiSub(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
+template<class T>
+static void SmiSubNoOverflowHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- movq(dst, src1);
+ masm->movq(dst, src1);
}
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ masm->subq(dst, src2);
+ masm->Assert(no_overflow, kSmiSubtractionOverflow);
+}
+
+
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
+ SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2) {
+ SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
@@ -2129,10 +2167,8 @@ void MacroAssembler::SelectNonSmi(Register dst,
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
- }
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
@@ -2240,6 +2276,90 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
// ----------------------------------------------------------------------------
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2376,8 +2496,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(dst, source);
}
}
@@ -2387,8 +2506,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
movq(dst, kScratchRegister);
}
}
@@ -2399,8 +2517,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2411,8 +2528,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2423,47 +2539,22 @@ void MacroAssembler::Push(Handle<Object> source) {
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
push(kScratchRegister);
}
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+ Handle<Object> object) {
AllowDeferredHandleDereference using_raw_address;
+ ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
movq(result, cell, RelocInfo::CELL);
movq(result, Operand(result, 0));
} else {
- Move(result, object);
- }
-}
-
-
-void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- Cmp(reg, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
+ movq(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2548,7 +2639,8 @@ void MacroAssembler::Call(Handle<Code> code_object,
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@@ -2651,7 +2743,8 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2710,7 +2803,8 @@ void MacroAssembler::JumpToHandlerEntry() {
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2750,7 +2844,8 @@ void MacroAssembler::Throw(Register value) {
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2917,7 +3012,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
@@ -2925,9 +3020,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -2940,10 +3033,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -3028,9 +3119,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
@@ -3050,7 +3139,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label* conversion_failed,
Label::Distance dst) {
cvttsd2si(result_reg, input_reg);
- cvtlsi2sd(xmm0, result_reg);
+ Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -3087,7 +3176,7 @@ void MacroAssembler::TaggedToI(Register result_reg,
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, xmm0);
- cvtlsi2sd(temp, result_reg);
+ Cvtlsi2sd(temp, result_reg);
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, dst);
@@ -3104,6 +3193,39 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(rax);
+ Push(Smi::FromInt(reason));
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // Control will not return here.
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3180,7 +3302,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
@@ -3462,7 +3584,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -3471,18 +3593,27 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
+ ASSERT(function.is(rdi));
+ movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+
InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Move(rdi, function);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
@@ -3559,6 +3690,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ } else {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ push(rdi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
@@ -3590,9 +3745,10 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ kFPOnStackSize + kPCOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
movq(rbp, rsp);
@@ -3620,7 +3776,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
@@ -3683,23 +3839,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movq(rsp, rbp);
pop(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movq(rsi, context_operand);
+ }
#ifdef DEBUG
movq(context_operand, Immediate(0));
#endif
@@ -3777,6 +3935,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -3851,8 +4012,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
movq(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -3870,7 +4030,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -4280,18 +4440,27 @@ void MacroAssembler::CopyBytes(Register destination,
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
}
- Label loop, done, short_string, short_loop;
+ Label short_loop, len8, len16, len24, done, short_string;
- const int kLongStringLimit = 20;
+ const int kLongStringLimit = 4 * kPointerSize;
if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
+ cmpl(length, Immediate(kPointerSize));
+ j(below, &short_string, Label::kNear);
}
ASSERT(source.is(rsi));
ASSERT(destination.is(rdi));
ASSERT(length.is(rcx));
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(2 * kPointerSize));
+ j(below_equal, &len8, Label::kNear);
+ cmpl(length, Immediate(3 * kPointerSize));
+ j(below_equal, &len16, Label::kNear);
+ cmpl(length, Immediate(4 * kPointerSize));
+ j(below_equal, &len24, Label::kNear);
+ }
+
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
@@ -4305,25 +4474,38 @@ void MacroAssembler::CopyBytes(Register destination,
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
- jmp(&done);
+ jmp(&done, Label::kNear);
+ bind(&len24);
+ movq(scratch, Operand(source, 2 * kPointerSize));
+ movq(Operand(destination, 2 * kPointerSize), scratch);
+ bind(&len16);
+ movq(scratch, Operand(source, kPointerSize));
+ movq(Operand(destination, kPointerSize), scratch);
+ bind(&len8);
+ movq(scratch, Operand(source, 0));
+ movq(Operand(destination, 0), scratch);
+ // Move remaining bytes of length.
+ movq(scratch, Operand(source, length, times_1, -kPointerSize));
+ movq(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addq(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
}
- lea(scratch, Operand(destination, length, times_1, 0));
bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
+ movb(scratch, Operand(source, 0));
+ movb(Operand(destination, 0), scratch);
incq(source);
incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
+ decl(length);
+ j(not_zero, &short_loop);
}
+
+ bind(&done);
}
@@ -4478,6 +4660,39 @@ int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object);
+ Throw(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmpq(value, Immediate(encoding_mask));
+ pop(value);
+ ThrowIf(not_equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Integer32ToSmi(index, index);
+ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ ThrowIf(greater_equal, kIndexIsTooLarge);
+
+ SmiCompare(index, Smi::FromInt(0));
+ ThrowIf(less, kIndexIsNegative);
+
+ // Restore the index
+ SmiToInteger32(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
@@ -4762,7 +4977,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+ Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
j(equal, call_runtime);
jmp(&start);
@@ -4791,8 +5006,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -4800,14 +5015,39 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- movq(kScratchRegister, new_space_start);
+ Move(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
- bind(&no_memento_available);
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ ASSERT(!scratch1.is(scratch0));
+ Register current = scratch0;
+ Label loop_again;
+
+ movq(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ movq(current, FieldOperand(current, HeapObject::kMapOffset));
+ movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Immediate(Map::kElementsKindMask));
+ shr(scratch1, Immediate(Map::kElementsKindShift));
+ cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ movq(current, FieldOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ j(not_equal, &loop_again);
}