summaryrefslogtreecommitdiff
path: root/chromium/v8/src/ia32/macro-assembler-ia32.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/ia32/macro-assembler-ia32.cc')
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc456
1 files changed, 363 insertions, 93 deletions
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index b65d328435e..52d42f6ca87 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -45,7 +46,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
@@ -55,6 +55,34 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
@@ -232,7 +260,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
@@ -253,8 +281,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -283,7 +311,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -344,7 +372,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
@@ -361,7 +389,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
@@ -390,9 +418,9 @@ void MacroAssembler::TaggedToI(Register result_reg,
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -445,25 +473,36 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -574,6 +613,10 @@ void MacroAssembler::RecordWriteForMap(
return;
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
@@ -630,6 +673,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@@ -676,6 +723,12 @@ void MacroAssembler::DebugBreak() {
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -799,9 +852,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -821,7 +874,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -834,8 +887,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -849,9 +902,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -864,10 +915,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -996,6 +1045,30 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
@@ -1033,10 +1106,8 @@ void MacroAssembler::EnterExitFramePrologue() {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
@@ -1051,7 +1122,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1095,7 +1166,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -1109,14 +1180,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1201,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -1344,8 +1417,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
@@ -1421,8 +1495,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1440,7 +1513,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -1942,30 +2015,48 @@ void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
+ Label short_loop, len4, len8, len12, done, short_string;
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
- jmp(&done);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
@@ -2096,8 +2187,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2109,8 +2198,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2141,23 +2229,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -2172,7 +2246,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -2221,11 +2296,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2358,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2381,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2418,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -2543,7 +2630,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -2552,18 +2639,25 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
+ ASSERT(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper, call_kind);
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2980,6 +3074,40 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3003,6 +3131,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3063,6 +3273,42 @@ void MacroAssembler::JumpIfNotUniqueName(Operand operand,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Throw(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ ThrowIf(not_equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ // Can't use overflow here directly, compiler can't seem to disambiguate.
+ ThrowIf(NegateCondition(no_overflow), kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ ThrowIf(greater_equal, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ ThrowIf(less, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -3379,7 +3625,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
@@ -3408,9 +3654,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
-
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3419,15 +3664,40 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Immediate(isolate()->factory()->allocation_memento_map()));
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32