summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h5
-rw-r--r--deps/v8/src/x64/assembler-x64.cc123
-rw-r--r--deps/v8/src/x64/assembler-x64.h4
-rw-r--r--deps/v8/src/x64/builtins-x64.cc185
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc597
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc22
-rw-r--r--deps/v8/src/x64/codegen-x64.h73
-rw-r--r--deps/v8/src/x64/cpu-x64.cc12
-rw-r--r--deps/v8/src/x64/debug-x64.cc19
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc206
-rw-r--r--deps/v8/src/x64/disasm-x64.cc74
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc47
-rw-r--r--deps/v8/src/x64/ic-x64.cc37
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc368
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h14
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc85
-rw-r--r--deps/v8/src/x64/lithium-x64.h480
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc229
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h35
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc7
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc867
23 files changed, 1493 insertions, 2002 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 826c06e5ba..07d07033e9 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -448,7 +448,7 @@ Object** RelocInfo::call_object_address() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -463,12 +463,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 8969d89a6a..41bf297b38 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -63,98 +63,32 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { CpuFeatureScope fscope(&assm, CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE64);
- __ cpuid();
+ // SSE2 must be available on every x64 CPU.
+ ASSERT(cpu.has_sse2());
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+
+ // CMOD must be available on every x64 CPU.
+ ASSERT(cpu.has_cmov());
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
+
+ // SAHF is not generally available in long mode.
+ if (cpu.has_sahf()) {
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
}
- supported_ = kDefaultCpuFeatures;
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
-
- uint64_t probed_features = probe();
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
-
- // CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
}
@@ -462,7 +396,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -987,7 +921,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
- ASSERT(IsEnabled(CPUID));
EnsureSpace ensure_space(this);
emit(0x0F);
emit(0xA2);
@@ -1600,7 +1533,7 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
@@ -1919,13 +1852,6 @@ void Assembler::pushfq() {
}
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -2992,6 +2918,17 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x01); // LT == 1
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
ASSERT(IsEnabled(SSE4_1));
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 4e36b6e4bc..f2e37fe863 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -475,7 +475,6 @@ class CpuFeatures : public AllStatic {
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -1176,7 +1175,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1386,6 +1384,8 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 18a6e566c6..81721c25e1 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -73,6 +73,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+ // Function is also the parameter to the runtime call.
+ __ push(rdi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movq(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -84,57 +102,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
-}
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -586,26 +574,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -613,26 +582,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -708,7 +658,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
@@ -717,7 +667,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -782,8 +732,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, rax);
+ __ movq(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -808,7 +758,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -837,7 +787,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movq(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
@@ -851,7 +801,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+ __ movq(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -868,7 +818,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ movq(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1178,10 +1128,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first argument into rax and get rid of the rest
// (including the receiver).
+ StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
@@ -1407,32 +1358,46 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ Integer32ToSmi(rdx, rdx);
+
+ // Pass both function and pc offset as arguments.
__ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(rdx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmpq(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ movq(Operand(rsp, 0), rax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 787a501829..51e1a5395c 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -295,140 +306,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // rcx holds native context, rbx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
- __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ PopReturnAddressTo(rcx);
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -437,7 +314,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
@@ -483,10 +361,10 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(1));
// Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
@@ -560,7 +438,6 @@ class FloatingPointHelper : public AllStatic {
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
@@ -569,9 +446,6 @@ class FloatingPointHelper : public AllStatic {
static void LoadAsIntegers(MacroAssembler* masm,
Label* operand_conversion_failure,
Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
// Tries to convert two values to smis losslessly.
// This fails if either argument is not a Smi nor a HeapNumber,
@@ -1262,8 +1136,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
Label input_not_smi, loaded;
+
// Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
@@ -1324,7 +1200,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ movq(rax, cache_array);
int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
@@ -1548,40 +1424,6 @@ void TranscendentalCacheStub::GenerateOperation(
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ JumpIfNotSmi(rax, &rax_is_object);
-
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
@@ -1605,10 +1447,8 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ // Get the untagged integer version of the rdx heap number in r8.
+ __ TruncateHeapNumberToI(r8, rdx);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -1628,9 +1468,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
- DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ TruncateHeapNumberToI(rcx, rax);
__ bind(&done);
__ movl(rax, r8);
@@ -1645,30 +1483,6 @@ void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1796,8 +1610,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(base, args.GetArgumentOperand(0));
+ __ movq(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -1830,16 +1645,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
+ Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -2230,7 +2046,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
// rbx = parameter count (untagged)
// Check if the calling frame is an arguments adaptor frame.
@@ -2252,7 +2069,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
@@ -2313,7 +2130,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -2364,7 +2181,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
+ __ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
@@ -2403,7 +2220,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
@@ -2430,7 +2247,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -2449,12 +2266,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -2475,18 +2293,19 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
@@ -2516,7 +2335,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -2527,7 +2346,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -3010,7 +2829,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(r8, args.GetArgumentOperand(0));
__ JumpIfNotSmi(r8, &slowcase);
__ SmiToInteger32(rbx, r8);
__ cmpl(rbx, Immediate(kMaxInlineLength));
@@ -3048,11 +2868,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
+ __ movq(r8, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ movq(r8, args.GetArgumentOperand(1));
__ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ __ movq(r8, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
// Fill out the elements FixedArray.
@@ -3183,7 +3003,8 @@ void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
- __ movq(rbx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rbx, args.GetArgumentOperand(0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
@@ -3497,16 +3318,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@@ -3596,6 +3407,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
+ StackArgumentsAccessor args(rsp, argc_);
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -3603,15 +3415,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (ReceiverMightBeImplicit()) {
Label call;
// Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rax, args.GetReceiverOperand());
// Call as function is indicated with the hole.
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &call, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
+ __ movq(args.GetReceiverOperand(), rcx);
__ bind(&call);
}
@@ -3673,13 +3484,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -3734,7 +3545,7 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
#ifdef _WIN64
return result_size_ == 1;
#else
@@ -4208,12 +4019,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const unsigned int kWordBeforeResultValue = 0x458B4909;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
+ int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
// Get the object - go slow case if it's a smi.
Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
+ StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -4223,7 +4035,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
+ __ movq(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -4258,8 +4070,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
// Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
@@ -4299,8 +4111,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -4309,7 +4121,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ Set(rax, 0);
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
@@ -4322,8 +4134,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -4331,7 +4143,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
@@ -4489,8 +4301,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
+ __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
// Otherwise, at least one of the arguments is definitely a string,
@@ -4831,7 +4644,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -4839,22 +4651,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- &not_cached);
+ slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
__ bind(&done);
}
@@ -5497,8 +5296,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[8] : right string
// rsp[16] : left string
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(0)); // left
+ __ movq(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
@@ -6011,9 +5811,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
+ kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ __ movq(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
@@ -6033,7 +5835,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ cmpq(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -6083,8 +5885,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
{ REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
@@ -6121,7 +5921,7 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6385,8 +6185,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
// Get array literal index, array literal and its map.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rbx, args.GetArgumentOperand(0));
__ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -6511,96 +6312,133 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // rbx - type info cell
- // rdx - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
// rsp[0] - return address
// rsp[8] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
- // is the low bit set? If so, we are holey and that is good.
- __ testb(rdx, Immediate(1));
Label normal_sequence;
- __ j(not_zero, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ testb(rdx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+ }
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ incl(rdx);
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &normal_sequence);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
- __ SmiToInteger32(rdx, rdx);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ incl(rdx);
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -6633,6 +6471,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -6668,50 +6534,22 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
-
- // The type cell may have undefined in its value.
- __ Cmp(rdx, undefined_sentinel);
- __ j(equal, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
__ Cmp(FieldOperand(rdx, 0),
Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
__ j(not_equal, &no_info);
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ movq(rdx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -6732,7 +6570,8 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index e430bf2c80..41678ecd20 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -321,7 +321,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index a39f14b075..24773c2595 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -744,6 +744,28 @@ void Code::PatchPlatformCodeAge(byte* sequence,
}
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+ ASSERT(index >= 0);
+ ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp));
+ int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
+ int displacement_to_last_argument = base_reg_.is(rsp) ?
+ kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ displacement_to_last_argument += extra_displacement_to_last_argument_;
+ if (argument_count_reg_.is(no_reg)) {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ ASSERT(argument_count_immediate_ + receiver > 0);
+ return Operand(base_reg_, displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ } else {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
+ return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 5747e0bc6f..7d1f59ad5f 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
@@ -103,6 +103,73 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
+
+enum StackArgumentsAccessorReceiverMode {
+ ARGUMENTS_CONTAIN_RECEIVER,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER
+};
+
+
+class StackArgumentsAccessor BASE_EMBEDDED {
+ public:
+ StackArgumentsAccessor(
+ Register base_reg,
+ int argument_count_immediate,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(no_reg),
+ argument_count_immediate_(argument_count_immediate),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ Register argument_count_reg,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(argument_count_reg),
+ argument_count_immediate_(0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(parameter_count.is_reg() ?
+ parameter_count.reg() : no_reg),
+ argument_count_immediate_(parameter_count.is_immediate() ?
+ parameter_count.immediate() : 0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ Operand GetArgumentOperand(int index);
+ Operand GetReceiverOperand() {
+ ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+ return GetArgumentOperand(0);;
+ }
+
+ private:
+ const Register base_reg_;
+ const Register argument_count_reg_;
+ const int argument_count_immediate_;
+ const StackArgumentsAccessorReceiverMode receiver_mode_;
+ const int extra_displacement_to_last_argument_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 96c5330832..4fa290a8b5 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -72,18 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index e6bc92950a..6612242a03 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -50,7 +50,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -80,7 +80,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -123,14 +123,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
- // Store the 64-bit value as two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
+ __ PushInt64AsTwoSmis(reg);
}
}
@@ -155,12 +149,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
+ __ PopInt64AsTwoSmis(reg);
}
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index e9cf567f7e..303b756cac 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -105,12 +105,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -126,12 +121,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -146,195 +136,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -531,9 +359,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
+ __ push(Operand(rbx, FrameDescription::state_offset()));
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index eefa70372e..9984a46307 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -332,10 +332,10 @@ class DisassemblerX64 {
private:
enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
+ OPERAND_BYTE_SIZE = 0,
+ OPERAND_WORD_SIZE = 1,
+ OPERAND_DOUBLEWORD_SIZE = 2,
+ OPERAND_QUADWORD_SIZE = 3
};
const NameConverter& converter_;
@@ -369,10 +369,10 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
+ if (byte_size_operand_) return OPERAND_BYTE_SIZE;
+ if (rex_w()) return OPERAND_QUADWORD_SIZE;
+ if (operand_size_ != 0) return OPERAND_WORD_SIZE;
+ return OPERAND_DOUBLEWORD_SIZE;
}
char operand_size_code() {
@@ -562,19 +562,19 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
int64_t value;
int count;
switch (size) {
- case BYTE_SIZE:
+ case OPERAND_BYTE_SIZE:
value = *data;
count = 1;
break;
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<int16_t*>(data);
count = 2;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data);
count = 4;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data);
count = 4;
break;
@@ -682,7 +682,8 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
AppendToBuffer("%s%c ", mnem, operand_size_code());
int count = PrintRightOperand(data + 1);
AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+ OperandSize immediate_size =
+ byte_size_immediate ? OPERAND_BYTE_SIZE : operand_size();
count += PrintImmediate(data + 1 + count, immediate_size);
return 1 + count;
}
@@ -1153,6 +1154,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
} else {
UnimplementedInstruction();
}
@@ -1229,8 +1249,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
+ } else if (opcode == 0xA2) {
+ // CPUID
AppendToBuffer("%s", mnemonic);
} else if ((opcode & 0xF0) == 0x40) {
@@ -1294,14 +1314,14 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "nop";
case 0x2A: // F2/F3 prefix.
return "cvtsi2s";
- case 0x31:
- return "rdtsc";
case 0x51: // F2 prefix.
return "sqrtsd";
case 0x58: // F2 prefix.
return "addsd";
case 0x59: // F2 prefix.
return "mulsd";
+ case 0x5A: // F2 prefix.
+ return "cvtsd2ss";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
@@ -1398,15 +1418,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case MOVE_REG_INSTR: {
byte* addr = NULL;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
data += 9;
break;
@@ -1611,11 +1631,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov%c %s, ",
operand_size_code(),
NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
AppendToBuffer("movb %s, ",
NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
+ data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
break;
}
@@ -1644,7 +1664,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA1: // Fall through.
case 0xA3:
switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
+ case OPERAND_DOUBLEWORD_SIZE: {
const char* memory_location = NameOfAddress(
reinterpret_cast<byte*>(
*reinterpret_cast<int32_t*>(data + 1)));
@@ -1656,7 +1676,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 5;
break;
}
- case QUADWORD_SIZE: {
+ case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location = NameOfAddress(
*reinterpret_cast<byte**>(data + 1));
@@ -1682,15 +1702,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA9: {
int64_t value = 0;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<uint16_t*>(data + 1);
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data + 1);
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data + 1);
data += 5;
break;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 6333e87bea..c24512ecae 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -280,8 +280,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,8 +340,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -388,8 +386,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(rax);
EmitProfilingCounterReset();
@@ -1292,7 +1290,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ Push(info);
+ __ Move(rbx, info);
__ CallStub(&stub);
} else {
__ push(rsi);
@@ -2937,7 +2935,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2951,7 +2949,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
@@ -2969,7 +2967,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmpq(rcx, Immediate(0));
__ j(equal, &done);
- __ LoadInstanceDescriptors(rbx, rbx);
+ __ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
@@ -2977,24 +2975,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
+ __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
+ __ movq(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(rbx, rcx);
+ __ cmpq(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
@@ -3006,14 +3008,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3249,7 +3246,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 4837b9aa9a..4a7c68a53c 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -822,8 +822,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -859,8 +859,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
__ bind(&miss);
}
@@ -904,8 +904,8 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
Label miss;
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
@@ -940,8 +940,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ IncrementCounter(counters->keyed_call_miss(), 1);
}
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
@@ -965,7 +965,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
@@ -975,7 +975,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// Patch the receiver on the stack.
__ bind(&global);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
@@ -1005,8 +1005,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1023,8 +1023,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
@@ -1302,7 +1302,8 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
__ movq(rdi, mapped_location);
@@ -1331,7 +1332,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
GenerateMiss(masm);
@@ -1452,8 +1453,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 4fbcbcdfc9..9dca6b3e20 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -32,6 +32,7 @@
#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -39,7 +40,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -47,13 +48,13 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {
codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -257,6 +258,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ subq(rsp, Immediate(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -357,6 +373,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
code->Generate();
if (NeedsDeferredFrame()) {
+ __ bind(code->done());
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
@@ -450,7 +467,7 @@ ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -582,7 +599,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -979,8 +996,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1299,7 +1315,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ movq(kScratchRegister, left);
+ } else {
+ __ movl(kScratchRegister, left);
+ }
}
bool can_overflow =
@@ -1347,14 +1367,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
@@ -1368,9 +1388,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
- __ testl(left, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ testq(left, left);
+ } else {
+ __ testl(left, left);
+ }
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
+ // Constant can't be represented as Smi due to immediate size limit.
+ ASSERT(!instr->hydrogen_value()->representation().IsSmi());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1378,11 +1404,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ } else {
+ __ orl(kScratchRegister, ToOperand(right));
+ }
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToRegister(right));
+ } else {
+ __ orl(kScratchRegister, ToRegister(right));
+ }
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1580,7 +1614,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -2467,15 +2501,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -3401,14 +3435,14 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3633,90 +3667,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
+ // Assert that register size is twice the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ movq(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+ Register state1 = ToRegister(instr->scratch2());
+ __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movzxwl(rdx, rax);
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzxwl(scratch3, state0);
+ __ imull(scratch3, scratch3, Immediate(18273));
+ __ shrl(state0, Immediate(16));
+ __ addl(state0, scratch3);
// Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(rdx, rcx);
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
+ __ movzxwl(scratch3, state1);
+ __ imull(scratch3, scratch3, Immediate(36969));
+ __ shrl(state1, Immediate(16));
+ __ addl(state1, scratch3);
// Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
+ Register random = state0;
+ __ shll(random, Immediate(14));
+ __ andl(state1, Immediate(0x3FFFF));
+ __ addl(random, state1);
- __ bind(deferred->exit());
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movq(rcx, V8_INT64_C(0x4130000000000000),
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // We use xmm0 as fixed scratch register here.
+ XMMRegister scratch4 = xmm0;
+ __ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(xmm2, rcx);
- __ movd(xmm1, rax);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
+ __ movq(scratch4, scratch3);
+ __ movd(result, random);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
@@ -3906,6 +3914,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4323,12 +4339,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4375,12 +4393,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4469,14 +4489,14 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagU(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4532,12 +4552,14 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4651,60 +4673,47 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+ Label heap_number;
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr->environment());
__ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
__ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
+ Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TaggedToI(input_reg, input_reg, xmm_temp,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4752,34 +4761,16 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
}
}
@@ -4789,31 +4780,19 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label done;
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr->environment());
}
@@ -4881,10 +4860,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- __ CmpHeapObject(reg, target);
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ __ CmpHeapObject(reg, object);
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4901,17 +4880,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5001,12 +4980,14 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5157,7 +5138,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ Push(instr->hydrogen()->shared_info());
+ __ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
@@ -5362,12 +5343,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5381,8 +5364,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5418,9 +5402,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index a74ec7982c..f994645019 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -44,7 +44,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -123,10 +123,9 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -191,6 +190,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS
@@ -384,7 +386,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -420,13 +422,14 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
protected:
@@ -437,6 +440,7 @@ class LDeferredCode: public ZoneObject {
LCodeGen* codegen_;
Label entry_;
Label exit_;
+ Label done_;
Label* external_exit_;
int instruction_index_;
};
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index d828455921..f218455b67 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index ce5d50c116..d9daaacca0 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -32,6 +32,7 @@
#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -263,6 +264,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -431,6 +440,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -734,12 +752,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -1088,6 +1101,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1592,9 +1613,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), arg_reg_1);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1825,8 +1850,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
@@ -1921,9 +1947,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
@@ -2347,10 +2373,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2372,6 +2406,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
// There are no real uses of a captured object.
return NULL;
}
@@ -2418,20 +2454,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 77bebe64bd..b3d08c8a4c 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -62,12 +62,12 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -160,6 +160,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -186,13 +187,17 @@ class LCodeGen;
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -202,7 +207,7 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
@@ -211,7 +216,7 @@ class LInstruction: public ZoneObject {
set_position(RelocInfo::kNoPosition);
}
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -310,11 +315,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -325,15 +332,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -344,8 +351,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -382,11 +389,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -394,14 +401,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -410,7 +417,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -426,7 +433,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -435,22 +442,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -466,14 +475,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -484,19 +495,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -535,7 +548,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -549,7 +562,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -570,7 +583,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -584,11 +597,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -600,14 +613,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -624,7 +637,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -643,7 +656,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -662,7 +675,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 0> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -677,7 +690,7 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -696,11 +709,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -713,7 +726,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 0> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathRound(LOperand* value) {
inputs_[0] = value;
@@ -726,7 +739,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -739,7 +752,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -751,7 +764,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -763,7 +776,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -775,7 +788,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -787,7 +800,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -804,7 +817,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -816,7 +829,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
@@ -828,7 +841,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -842,7 +855,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -855,7 +868,7 @@ class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -866,11 +879,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -883,7 +896,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -896,11 +909,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -911,11 +924,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -929,11 +942,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -947,13 +960,13 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -965,11 +978,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -982,7 +995,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -994,11 +1008,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1014,11 +1028,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1035,7 +1049,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1049,7 +1063,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1067,7 +1081,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1076,7 +1091,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1089,7 +1104,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1104,7 +1119,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1121,7 +1136,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1142,7 +1157,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1157,7 +1172,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1166,7 +1181,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1175,7 +1190,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1190,7 +1205,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1201,16 +1216,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1221,17 +1238,17 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCmpMapAndBranch: public LControlInstruction<1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1246,7 +1263,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 0> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1258,7 +1275,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1271,7 +1288,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
inputs_[0] = value;
@@ -1284,7 +1301,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
};
-class LDateField: public LTemplateInstruction<1, 1, 0> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1301,7 +1318,7 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1325,7 +1342,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1337,7 +1354,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1357,7 +1374,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1372,7 +1389,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1387,20 +1404,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
LOperand* global_object() { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1412,16 +1438,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1433,16 +1461,18 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1465,7 +1495,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1478,7 +1508,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1492,7 +1522,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1505,7 +1535,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1518,7 +1549,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1533,7 +1564,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
@@ -1541,7 +1572,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1555,14 +1586,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1577,7 +1608,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1592,7 +1623,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1611,7 +1642,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1624,11 +1655,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1645,11 +1676,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1661,7 +1692,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1674,7 +1705,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1683,28 +1731,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1716,20 +1764,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1741,7 +1789,7 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
@@ -1753,7 +1801,7 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1764,13 +1812,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1781,25 +1829,25 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
LOperand* key() { return inputs_[0]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1813,30 +1861,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1847,13 +1895,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1864,13 +1912,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1880,7 +1928,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1892,7 +1940,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1905,7 +1953,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1919,7 +1967,7 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1931,7 +1979,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1945,7 +1993,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1961,7 +2009,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -1976,7 +2024,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1990,7 +2038,7 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2007,7 +2055,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2019,7 +2067,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2032,7 +2080,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2049,7 +2097,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2064,7 +2112,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
@@ -2073,7 +2121,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2086,14 +2134,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2110,13 +2158,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2131,13 +2179,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
@@ -2155,7 +2203,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2164,7 +2212,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2180,7 +2228,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2195,7 +2243,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2210,7 +2258,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2223,20 +2271,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2249,7 +2297,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2262,7 +2310,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2274,7 +2322,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2286,7 +2334,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2298,7 +2346,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
LOperand* temp_xmm) {
@@ -2313,7 +2361,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2326,7 +2374,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate: public LTemplateInstruction<1, 1, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
@@ -2341,21 +2389,21 @@ class LAllocate: public LTemplateInstruction<1, 1, 1> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2368,7 +2416,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2380,7 +2428,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2393,11 +2441,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2411,16 +2459,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2432,7 +2482,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2444,7 +2494,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2460,7 +2510,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2474,7 +2524,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2489,7 +2539,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2499,7 +2549,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 74e3fcc33a..69abc5454f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -285,16 +285,17 @@ void MacroAssembler::InNewSpace(Register object,
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+ reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ and_(scratch,
+ Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
}
@@ -524,7 +525,13 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE64);
push(kScratchRegister);
@@ -679,22 +686,8 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
- bool returns_handle) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- if (!returns_handle) {
- EnterApiExitFrame(arg_stack_space);
- return;
- }
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
+void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
EnterApiExitFrame(arg_stack_space);
-#endif
}
@@ -702,7 +695,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
Label prologue;
Label promote_scheduled_exception;
@@ -775,23 +767,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
- // Can skip the result check for new-style callbacks
- // TODO(dcarney): may need to pass this information down
- // as some function_addresses might not have been registered
- if (returns_handle) {
- Label empty_result;
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- jmp(&prologue);
- bind(&empty_result);
- }
// Load the value from ReturnValue
movq(rax, Operand(rbp, return_value_offset * kPointerSize));
bind(&prologue);
@@ -984,7 +959,10 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
-bool MacroAssembler::IsUnsafeInt(const int x) {
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
static const int kMaxBits = 17;
return !is_intn(x, kMaxBits);
}
@@ -992,7 +970,7 @@ bool MacroAssembler::IsUnsafeInt(const int x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1004,7 +982,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1015,9 +993,6 @@ void MacroAssembler::SafePush(Smi* src) {
}
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -2222,6 +2197,49 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
}
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ push(constant);
+ }
+}
+
+
+void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+ movq(scratch, src);
+ // High bits.
+ shr(src, Immediate(64 - kSmiShift));
+ shl(src, Immediate(kSmiShift));
+ push(src);
+ // Low bits.
+ shl(scratch, Immediate(kSmiShift));
+ push(scratch);
+}
+
+
+void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
+ pop(scratch);
+ // Low bits.
+ shr(scratch, Immediate(kSmiShift));
+ pop(dst);
+ shr(dst, Immediate(kSmiShift));
+ // High bits.
+ shl(dst, Immediate(64 - kSmiShift));
+ or_(dst, scratch);
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+}
+
+
+// ----------------------------------------------------------------------------
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2461,17 +2479,6 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
}
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
@@ -2479,11 +2486,6 @@ void MacroAssembler::Drop(int stack_elements) {
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
void MacroAssembler::TestBit(const Operand& src, int bits) {
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
@@ -2991,6 +2993,117 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
}
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done;
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2siq(result_reg, xmm0);
+ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ // Slow case.
+ if (input_reg.is(result_reg)) {
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), xmm0);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2siq(result_reg, input_reg);
+ movq(kScratchRegister,
+ V8_INT64_C(0x8000000000000000),
+ RelocInfo::NONE64);
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), input_reg);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ cvttsd2si(result_reg, input_reg);
+ cvtlsi2sd(xmm0, result_reg);
+ ucomisd(xmm0, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ andl(result_reg, Immediate(1));
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision,
+ Label::Distance dst) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ // Heap number map check.
+ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ j(not_equal, lost_precision, dst);
+
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, xmm0);
+ cvtlsi2sd(temp, result_reg);
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, dst);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ andl(result_reg, Immediate(1));
+ j(not_zero, lost_precision, dst);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 61abc206e1..09c8a800cc 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -375,6 +375,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int32_t x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
@@ -720,6 +725,14 @@ class MacroAssembler: public Assembler {
}
void Push(Smi* smi);
+
+ // Save away a 64-bit integer on the stack as two 32-bit integers
+ // masquerading as smis so that the garbage collector skips visiting them.
+ void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
+ // smis on the top of stack.
+ void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+
void Test(const Operand& dst, Smi* source);
@@ -774,11 +787,6 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -967,6 +975,20 @@ class MacroAssembler: public Assembler {
XMMRegister temp_xmm_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision,
+ Label::Distance dst = Label::kFar);
+
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1242,7 +1264,7 @@ class MacroAssembler: public Assembler {
// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space, bool returns_handle);
+ void PrepareCallApiFunction(int arg_stack_space);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
@@ -1252,7 +1274,6 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_rbp);
// Before calling a C-function from generated code, align arguments on stack.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index dcd317c666..ca834e2771 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -761,7 +761,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -771,7 +771,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -998,7 +998,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = this->isolate();
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
@@ -1188,7 +1188,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 34a557bd1a..95276d530d 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -29,6 +29,7 @@
#if V8_TARGET_ARCH_X64
+#include "arguments.h"
#include "ic-inl.h"
#include "codegen.h"
#include "stub-cache.h"
@@ -366,6 +367,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -373,8 +379,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
}
@@ -389,7 +393,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ Set(rax, 6);
+ __ Set(rax, StubCache::kInterceptorArgsLength);
__ LoadAddress(rbx, ref);
CEntryStub stub(1);
@@ -414,8 +418,10 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), scratch);
__ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ movq(args.GetArgumentOperand(i), scratch);
}
}
@@ -464,23 +470,26 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ int api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, api_call_argc);
+
// Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
+ __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
+ __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
}
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, 5 * kPointerSize), kScratchRegister);
- __ movq(Operand(rsp, 6 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
// Prepare arguments.
STATIC_ASSERT(kFastApiCallArguments == 6);
@@ -488,16 +497,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
Register callback_arg = rdx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = returns_handle ? rdx : rcx;
- Register callback_arg = returns_handle ? r8 : rdx;
#else
Register arguments_arg = rdi;
Register callback_arg = rsi;
@@ -507,7 +510,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kApiStackSpace);
__ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
__ addq(rbx, Immediate(argc * kPointerSize));
@@ -519,19 +522,49 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
callback_arg,
- argc + kFastApiCallArguments + 1,
- returns_handle,
+ api_call_argc + 1,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Copy return value.
+ __ movq(scratch, Operand(rsp, 0));
+ // Assign stack space for the call arguments.
+ __ subq(rsp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ movq(Operand(rsp, 0), scratch);
+ // Write holder to stack frame.
+ __ movq(Operand(rsp, 1 * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ movq(Operand(rsp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ movq(Operand(rsp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -690,7 +723,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -1075,7 +1108,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
+ __ movq(Operand(rsp, kPCOnStackSize), object_reg);
}
// Check the maps in the prototype chain.
@@ -1135,7 +1168,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
+ __ movq(Operand(rsp, kPCOnStackSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1195,7 +1228,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1280,42 +1313,50 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch4().is(reg));
__ PopReturnAddressTo(scratch4());
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
__ push(receiver()); // receiver
- __ push(reg); // holder
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1(), callback);
- __ push(FieldOperand(scratch1(),
+ ASSERT(!scratch2().is(reg));
+ __ Move(scratch2(), callback);
+ __ push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
+ ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ push(kScratchRegister); // return value
__ push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ push(reg); // holder
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register getter_arg = r8;
Register accessor_info_arg = rdx;
Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register getter_arg = returns_handle ? r9 : r8;
- Register accessor_info_arg = returns_handle ? r8 : rdx;
- Register name_arg = returns_handle ? rdx : rcx;
#else
Register getter_arg = rdx;
Register accessor_info_arg = rsi;
@@ -1332,7 +1373,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ PrepareCallApiFunction(kArgStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kArgStackSpace);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ lea(rax, Operand(name_arg, 6 * kPointerSize));
@@ -1343,16 +1384,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
getter_arg,
kStackSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1451,7 +1489,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -1470,11 +1508,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Label* miss) {
ASSERT(holder->IsGlobalObject());
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the maps haven't changed.
@@ -1538,9 +1573,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1561,7 +1595,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -1591,11 +1625,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
// Check that function is still array
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1647,9 +1681,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1688,7 +1722,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
@@ -1723,7 +1757,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
@@ -1800,7 +1834,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin);
}
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
@@ -1849,7 +1883,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
@@ -1898,9 +1932,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1978,6 +2012,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
Label miss;
Label name_miss;
@@ -2003,9 +2038,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Register receiver = rbx;
Register index = rdi;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2059,6 +2094,8 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2084,9 +2121,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Register index = rdi;
Register scratch = rdx;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2139,13 +2176,14 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2158,7 +2196,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Load the char code argument.
Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
+ __ movq(code, args.GetArgumentOperand(argc));
// Check the code is a smi.
Label slow;
@@ -2200,8 +2238,123 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // TODO(872): implement this.
- return Handle<Code>::null();
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) {
+ return Handle<Code>::null();
+ }
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(rax, xmm0);
+
+ // Checks for 0x80000000 which signals a failed conversion.
+ Label conversion_failure;
+ __ cmpl(rax, Immediate(0x80000000));
+ __ j(equal, &conversion_failure);
+
+ // Smi tag and return.
+ __ Integer32ToSmi(rax, rax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&conversion_failure);
+ int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
+ __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ int64_t kOne = V8_INT64_C(0x3ff0000000000000);
+ __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(rax, rbx, &slow);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // rcx: function name.
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
}
@@ -2223,13 +2376,14 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2240,7 +2394,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(argc));
// Check if the argument is a smi.
Label not_smi;
@@ -2330,9 +2484,9 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
Label miss, miss_before_stack_reserved;
GenerateNameCheck(name, &miss_before_stack_reserved);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
@@ -2384,9 +2538,8 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
@@ -2410,7 +2563,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
break;
@@ -2530,21 +2683,20 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the function really is a function.
__ JumpIfSmi(rax, &miss);
@@ -2555,7 +2707,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -2602,15 +2754,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, arguments());
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Set up the context (function already in rdi).
@@ -2666,6 +2817,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2733,48 +2902,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(receiver(), HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(scratch1(), cell);
- Operand cell_operand =
- FieldOperand(scratch1(), PropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, value());
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -3037,484 +3164,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&restore_key_transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal