summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src/builtins
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadnode-new-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc1165
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc1245
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc515
-rw-r--r--deps/v8/src/builtins/builtins-array.cc112
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc174
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc80
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc392
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h31
-rw-r--r--deps/v8/src/builtins/builtins-call.cc75
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc2
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc1357
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc29
-rw-r--r--deps/v8/src/builtins/builtins-console.cc139
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc440
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc97
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.h32
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc22
-rw-r--r--deps/v8/src/builtins/builtins-date.cc6
-rw-r--r--deps/v8/src/builtins/builtins-debug-gen.cc (renamed from deps/v8/src/builtins/builtins-debug.cc)0
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h226
-rw-r--r--deps/v8/src/builtins/builtins-error.cc44
-rw-r--r--deps/v8/src/builtins/builtins-forin-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-function.cc3
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc115
-rw-r--r--deps/v8/src/builtins/builtins-global-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc21
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc13
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc40
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc269
-rw-r--r--deps/v8/src/builtins/builtins-intl.h30
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc184
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc30
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc1260
-rw-r--r--deps/v8/src/builtins/builtins-number.cc24
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc91
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc552
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h39
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc20
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc215
-rw-r--r--deps/v8/src/builtins/builtins-proxy.cc33
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc241
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc5
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc420
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc67
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc2
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins.cc81
-rw-r--r--deps/v8/src/builtins/builtins.h35
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc1037
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc980
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc992
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc1027
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc1017
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc16
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc984
-rw-r--r--deps/v8/src/builtins/x87/OWNERS2
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc3183
68 files changed, 8541 insertions, 10920 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 286df2eea7..b739170eb5 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -226,7 +226,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
@@ -374,7 +374,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
@@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(r2);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -456,6 +439,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r2;
+
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -486,8 +471,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(scratch);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@@ -503,13 +488,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
}
@@ -543,15 +528,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r4, r5);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -610,9 +594,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[4*kPointerSize]: context
// -----------------------------------
__ b(&entry);
+
__ bind(&loop);
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(r6, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(r6);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@@ -657,18 +642,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &leave_frame);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ b(eq, &use_receiver);
-
} else {
+ __ b(ne, &use_receiver);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ b(&use_receiver);
}
@@ -715,33 +702,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r0 : the value to pass to the generator
// -- r1 : the JSGeneratorObject to resume
// -- r2 : the resume mode (tagged)
- // -- r3 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r3);
- __ AssertGeneratorObject(r1, r3);
+ __ AssertGeneratorObject(r1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(r3, r3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmp(r3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ b(eq, &async_await);
-
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ str(r0, FieldMemOperand(
- r1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(r1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r0, r3, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&done_store_input);
- // `r3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
@@ -750,28 +718,31 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
__ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r5;
+
+ // Flood function if we are stepping.
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ mov(ip, Operand(debug_hook));
- __ ldrsb(ip, MemOperand(ip));
- __ cmp(ip, Operand(0));
+ __ mov(scratch, Operand(debug_hook));
+ __ ldrsb(scratch, MemOperand(scratch));
+ __ cmp(scratch, Operand(0));
__ b(ne, &prepare_step_in_if_stepping);
- // Flood function if we need to continue stepping in the suspended generator.
+ // Flood function if we need to continue stepping in the suspended
+ // generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ mov(ip, Operand(debug_suspended_generator));
- __ ldr(ip, MemOperand(ip));
- __ cmp(ip, Operand(r1));
+ __ mov(scratch, Operand(debug_suspended_generator));
+ __ ldr(scratch, MemOperand(scratch));
+ __ cmp(scratch, Operand(r1));
__ b(eq, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
// Push receiver.
- __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
@@ -792,7 +763,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ sub(r3, r3, Operand(1), SetCC);
__ b(mi, &done_loop);
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ b(&loop);
@@ -812,14 +783,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
r0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r0);
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Jump(r5);
+ __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(scratch);
}
__ bind(&prepare_step_in_if_stepping);
@@ -893,7 +863,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ ldr(cp, MemOperand(cp));
@@ -1011,6 +981,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count, LeaveCC);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
+ __ b(ne, &no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ cmp(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ b(eq, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ cmp(
+ optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+ // Checking whether the queued function is ready for install is
+ // optional, since we come across interrupts and stack checks elsewhere.
+ // However, not checking may delay installing ready functions, and
+ // always checking would be quite expensive. A good compromise is to
+ // first check against stack limit as a cue for an interrupt signal.
+ __ LoadRoot(scratch2, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(scratch2));
+ __ b(hs, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ ldr(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ b(ne, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1029,38 +1111,33 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r1;
+ Register feedback_vector = r2;
+
+ // Load the feedback vector from the closure.
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = r4;
- __ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
- __ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
- __ ldr(
- optimized_code_entry,
- FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(r0));
- __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiTst(debug_info);
- // Load original bytecode array or the debug copy.
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex), ne);
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
+ __ SmiTst(r4);
+ __ b(ne, &maybe_load_debug_bytecode_array);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1071,15 +1148,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
- __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ ldr(r9, FieldMemOperand(
- r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ ldr(r9,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ add(r9, r9, Operand(Smi::FromInt(1)));
- __ str(r9, FieldMemOperand(
- r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ str(r9,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1141,50 +1218,37 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- __ Call(ip);
+ __ Call(r4);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
+ __ SmiUntag(r9);
+ __ tst(r9, Operand(DebugInfo::kHasBreakInfo));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(r1, r4, r5);
+ __ str(r4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, r4, r5);
__ Jump(r4);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ ldr(r5, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
-
- __ b(ne, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
- r2);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1223,7 +1287,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1246,17 +1310,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r2, r4, r5 will be modified.
Generate_InterpreterPushArgs(masm, r3, r2, r4, r5);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r2); // Pass the spread in a register
+ __ sub(r0, r0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1281,15 +1349,21 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
- __ mov(ip, Operand::Zero());
- __ push(ip);
+ __ mov(r5, Operand::Zero());
+ __ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
// Push the arguments. r5, r4, r6 will be modified.
Generate_InterpreterPushArgs(masm, r0, r4, r5, r6);
- __ AssertUndefinedOrAllocationSite(r2, r5);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r2); // Pass the spread in a register
+ __ sub(r0, r0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r2, r5);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r1);
@@ -1329,8 +1403,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructArray(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
- __ mov(ip, Operand::Zero());
- __ push(ip);
+ __ mov(r5, Operand::Zero());
+ __ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
@@ -1387,9 +1461,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ mov(pc, ip);
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ __ Jump(scratch);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1415,6 +1491,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r1;
+
+ // Get the feedback vector.
+ Register feedback_vector = r2;
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1423,43 +1526,24 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r1;
- Register index = r2;
+ Register feedback_vector = r2;
// Do we have a valid feedback vector?
- __ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r4;
- __ ldr(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ b(ne, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ ldrb(r5, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1477,15 +1561,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1568,7 +1643,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(2, 0, r2);
+ __ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -1596,7 +1671,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(2, 0, r2);
+ __ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
@@ -1619,30 +1694,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(r0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r0);
}
__ add(sp, sp, Operand(kPointerSize)); // Ignore state
- __ mov(pc, lr); // Jump to miss handler
+ __ mov(pc, lr); // Jump to ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ str(r0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ ldr(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ add(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(lr);
+ __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1761,45 +1876,39 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r1, argArray into r0 (if present), remove all
+ // 1. Load receiver into r1, argArray into r2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ mov(r3, r2);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r5);
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
+ __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(sp, 0));
- __ mov(r0, r3);
+ __ str(r5, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r0 : argArray
+ // -- r2 : argArray
// -- r1 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r1, &receiver_not_callable);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
- __ b(eq, &receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1808,13 +1917,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mov(r0, Operand(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1840,13 +1942,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: callable
{
+ Register scratch = r3;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, -kPointerSize));
- __ str(ip, MemOperand(r2));
+ __ ldr(scratch, MemOperand(r2, -kPointerSize));
+ __ str(scratch, MemOperand(r2));
__ sub(r2, r2, Operand(kPointerSize));
__ cmp(r2, sp);
__ b(ne, &loop);
@@ -1869,49 +1972,36 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // 1. Load target into r1 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, r1);
__ mov(r2, r1);
- __ mov(r3, r1);
__ sub(r4, r0, Operand(1), SetCC);
__ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
+ __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(sp, 0));
- __ mov(r0, r3);
+ __ str(r5, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r0 : argumentsList
+ // -- r2 : argumentsList
// -- r1 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r1, &target_not_callable);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
- __ b(eq, &target_not_callable);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1923,7 +2013,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // 1. Load target into r1 (if present), argumentsList into r2 (if present),
// new.target into r3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1939,48 +2029,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ sub(r4, r4, Operand(1), SetCC, ge);
__ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ mov(r0, r2);
}
// ----------- S t a t e -------------
- // -- r0 : argumentsList
+ // -- r2 : argumentsList
// -- r3 : new.target
// -- r1 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r1, &target_not_constructor);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsConstructor));
- __ b(eq, &target_not_constructor);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r3, &new_target_not_constructor);
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsConstructor));
- __ b(eq, &new_target_not_constructor);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ str(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2007,154 +2075,61 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r0 : argumentsList
- // -- r1 : target
- // -- r3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r1 : target
+ // -- r0 : number of parameters on the stack (not including the receiver)
+ // -- r2 : arguments list (a FixedArray)
+ // -- r4 : len (number of elements to push from args)
+ // -- r3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(r2);
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r0, &create_runtime);
-
- // Load the map of argumentsList into r2.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Load native context into r4.
- __ ldr(r4, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ ldr(ip, ContextMemOperand(r4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r2);
- __ b(eq, &create_arguments);
- __ ldr(ip, ContextMemOperand(r4, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r2);
- __ b(eq, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r2, ip, JS_ARRAY_TYPE);
- __ b(eq, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r3, r0);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r1, r3);
- __ ldr(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ SmiUntag(r2);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ ldr(r2, FieldMemOperand(r0, JSArgumentsObject::kLengthOffset));
- __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ cmp(r2, ip);
- __ b(ne, &create_runtime);
- __ SmiUntag(r2);
- __ mov(r0, r4);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ ldr(r4, ContextMemOperand(r4, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(r2, r4);
- __ b(ne, &create_runtime);
- __ LoadRoot(r4, Heap::kArrayProtectorRootIndex);
- __ ldr(r2, FieldMemOperand(r4, PropertyCell::kValueOffset));
- __ cmp(r2, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &create_runtime);
- __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
- __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
- __ SmiUntag(r2);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r2 and r4 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ ldr(r5, FieldMemOperand(r2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r5);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(r5, Operand(FAST_HOLEY_ELEMENTS));
- __ b(hi, &create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ tst(r5, Operand(1));
- __ b(ne, &create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
- __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
- __ SmiUntag(r2);
-
- __ bind(&done_create);
- }
+ Register scratch = r8;
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ sub(ip, sp, ip);
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // The stack might already be overflowed here which will cause 'scratch' to
+ // become negative.
+ __ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ cmp(ip, Operand(r2, LSL, kPointerSizeLog2));
+ __ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2));
__ b(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r1 : target
- // -- r0 : args (a FixedArray built from argumentsList)
- // -- r2 : len (number of elements to push from args)
- // -- r3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(r4, Operand(0));
+ __ mov(r6, Operand(0));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
Label done, loop;
__ bind(&loop);
- __ cmp(r4, r2);
+ __ cmp(r6, r4);
__ b(eq, &done);
- __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
- __ cmp(r5, ip);
- __ mov(ip, r6, LeaveCC, eq);
- __ Push(ip);
- __ add(r4, r4, Operand(1));
+ __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ cmp(scratch, r5);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
+ __ Push(scratch);
+ __ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
- __ Move(r0, r4);
+ __ add(r0, r0, r6);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r3 : the new.target (for [[Construct]] calls)
@@ -2162,11 +2137,15 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
// -- r2 : start index (to support rest parameters)
// -----------------------------------
+ Register scratch = r6;
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ldr(scratch,
+ MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmp(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2180,11 +2159,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r5);
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
- __ SmiUntag(r5);
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
@@ -2198,8 +2177,8 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ add(r0, r0, r5);
__ bind(&loop);
{
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(scratch);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ne, &loop);
}
@@ -2214,103 +2193,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ ldrb(scratch1, MemOperand(scratch1));
- __ cmp(scratch1, Operand(0));
- __ b(eq, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ ldr(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ b(ne, &no_interpreter_frame);
- __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ ldr(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
@@ -2321,21 +2206,19 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
- __ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ b(ne, &done_convert);
{
// ----------- S t a t e -------------
@@ -2398,13 +2281,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r2);
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2463,6 +2341,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ Register scratch = r6;
+
// Relocate arguments down the stack.
{
Label loop, done_loop;
@@ -2470,8 +2350,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(r5, r0);
__ b(gt, &done_loop);
- __ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+ __ ldr(scratch, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, r5, LSL, kPointerSizeLog2));
__ add(r4, r4, Operand(1));
__ add(r5, r5, Operand(1));
__ b(&loop);
@@ -2486,8 +2366,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ sub(r4, r4, Operand(1), SetCC);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ add(r0, r0, Operand(1));
__ b(gt, &loop);
}
@@ -2498,36 +2378,31 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
// Patch the receiver to [[BoundThis]].
- __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
- __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
+ __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+
+ __ mov(r3, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
- __ ldr(ip, MemOperand(ip));
- __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3));
+ __ add(pc, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the target to call (can be any Object).
@@ -2537,10 +2412,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2548,22 +2423,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ tst(r4, Operand(1 << Map::kIsCallable));
__ b(eq, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ cmp(r5, Operand(JS_PROXY_TYPE));
__ b(ne, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(r0, r0, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r5, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ ldr(r5, MemOperand(r5));
+ __ add(pc, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2573,7 +2439,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2585,161 +2451,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r0;
- Register constructor = r1;
- Register new_target = r3;
-
- Register scratch = r2;
- Register scratch2 = r6;
-
- Register spread = r4;
- Register spread_map = r5;
-
- Register spread_len = r5;
-
- Label runtime_call, push_args;
- __ ldr(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ b(ne, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ ldr(scratch2, NativeContextMemOperand());
- __ ldr(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(scratch, scratch2);
- __ b(ne, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ ldr(scratch2, NativeContextMemOperand());
- __ ldr(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch, scratch2);
- __ b(ne, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ b(hi, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
- __ b(eq, &no_protector_check);
- __ cmp(scratch, Operand(FAST_ELEMENTS));
- __ b(eq, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor);
- __ Push(new_target);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, r0);
- __ Pop(argc);
- __ Pop(new_target);
- __ Pop(constructor);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ add(argc, argc, spread_len);
- __ sub(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
- __ b(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, Operand(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ b(eq, &done);
- __ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
- __ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ add(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r3 to save it.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2779,9 +2490,10 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
- __ ldr(ip, MemOperand(ip));
- __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ __ mov(r2, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ldr(r2, MemOperand(r2));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
@@ -2855,19 +2567,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (can be any Object)
- // -- r3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
@@ -2919,10 +2618,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
+ Register scratch = r5;
+
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
@@ -2943,8 +2644,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
- __ ldr(ip, MemOperand(r0, 0));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r0, 0));
+ __ push(scratch);
__ cmp(r0, r4); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@@ -2955,7 +2656,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
@@ -2971,9 +2672,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: new target (passed through to callee)
Label copy;
__ bind(&copy);
+
// Adjust load for return address and receiver.
- __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r0, 2 * kPointerSize));
+ __ push(scratch);
+
__ cmp(r0, fp); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@@ -2982,7 +2685,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
__ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
@@ -2990,7 +2693,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ bind(&fill);
- __ push(ip);
+ __ push(scratch);
__ cmp(sp, r4);
__ b(ne, &fill);
}
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 7e96dc4fb3..619c5de97b 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
@@ -379,7 +379,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
@@ -428,22 +428,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Br(x2);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However, not
- // checking may delay installing ready functions, and always checking would be
- // quite expensive. A good compromise is to first check against stack limit as
- // a cue for an interrupt signal.
- Label ok;
- __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ Bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -555,15 +539,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrb(x4,
- FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(x4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(w4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ B(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
x4, x5);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ B(&post_instantiation_deopt_entry);
@@ -677,18 +660,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
- __ Bind(&other_result);
// The result is now neither undefined nor an object.
+ __ Bind(&other_result);
+ __ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(w4, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrb(x4,
- FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(x4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ B(eq, &use_receiver);
-
} else {
+ __ B(ne, &use_receiver);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
}
@@ -741,32 +726,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x0 : the value to pass to the generator
// -- x1 : the JSGeneratorObject to resume
// -- x2 : the resume mode (tagged)
- // -- x3 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(x3);
- __ AssertGeneratorObject(x1, x3);
+ __ AssertGeneratorObject(x1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(x3, x3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Cmp(x3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ B(eq, &async_await);
-
__ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ Bind(&async_await);
- __ Str(x0, FieldMemOperand(
- x1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(x1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- x0, x3, kLRHasNotBeenSaved, kDontSaveFPRegs);
-
- __ Bind(&done_store_input);
- // `x3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
@@ -915,7 +882,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- __ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ __ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
masm->isolate())));
__ Ldr(cp, MemOperand(scratch));
@@ -1033,6 +1000,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Drop(args_count, 1);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee if needed, and caller)
+ // -- x3 : new target (preserved for callee if needed, and caller)
+ // -- x1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = x1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ Ldr(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CompareAndBranch(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
+ &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Cmp(
+ optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ Ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
+ &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1051,37 +1129,32 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = x1;
+ Register feedback_vector = x2;
+
+ // Load the feedback vector from the closure.
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp, cp, x1);
+ __ Push(lr, fp, cp, closure);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = x7;
- __ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
- __ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
- __ Ldr(
- optimized_code_entry,
- FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- Register debug_info = kInterpreterBytecodeArrayRegister;
- Label load_debug_bytecode_array, bytecode_array_loaded;
- DCHECK(!debug_info.is(x0));
- __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
__ Bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1093,7 +1166,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -1170,10 +1243,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
- // Load debug copy of the bytecode array.
- __ Bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ Bind(&maybe_load_debug_bytecode_array);
+ __ Ldr(x10, FieldMemOperand(x11, DebugInfo::kFlagsOffset));
+ __ SmiUntag(x10);
+ __ TestAndBranchIfAllClear(x10, DebugInfo::kHasBreakInfo,
+ &bytecode_array_loaded);
__ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
__ B(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1181,35 +1260,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
__ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(x1, x7, x5);
+ __ Str(x7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, x7, x5);
__ Jump(x7);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ Ldr(w8, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
- &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
- x13);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1255,7 +1311,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1280,17 +1336,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. x2, x4, x5, x6 will be modified.
Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(x2); // Pass the spread in a register
+ __ Sub(x0, x0, 1); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1322,7 +1382,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. x5, x4, x6, x7 will be modified.
Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7);
- __ AssertUndefinedOrAllocationSite(x2, x6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(x2); // Pass the spread in a register
+ __ Sub(x0, x0, 1); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(x2, x6);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1);
@@ -1446,6 +1512,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -- x1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = x1;
+
+ // Get the feedback vector.
+ Register feedback_vector = x2;
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1454,50 +1547,29 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = x1;
- Register index = x2;
+ Register feedback_vector = x2;
// Do we have a valid feedback vector?
- __ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = x7;
- __ Ldr(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ Ldr(w8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
- &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// We found no optimized code.
- Register temp = x5;
- __ Bind(&try_shared);
+ Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ Ldrb(temp, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestAndBranchIfAnySet(
- temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
- &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Move(temp, masm->CodeObject());
- __ Cmp(entry, temp);
+ __ Move(x5, masm->CodeObject());
+ __ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
// Install the SFI's code entry.
@@ -1510,15 +1582,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1657,37 +1720,73 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- // TODO(jbramley): Is it correct (and appropriate) to use safepoint
- // registers here? According to the comment above, we should only need to
- // preserve the registers with parameters.
- __ PushXRegList(kSafepointSavedRegisters);
+ // Preserve possible return result from lazy deopt.
+ __ Push(x0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ PopXRegList(kSafepointSavedRegisters);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ Pop(x0);
}
// Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
__ Drop(1);
- // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
- // into lr before it jumps here.
+ // Jump to the ContinueToBuiltin stub. Deoptimizer::EntryGenerator::Generate
+ // loads this into lr before it jumps here.
__ Br(lr);
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ Str(x0, MemOperand(
+ jssp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ ldr(fp,
+ MemOperand(jssp,
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip0);
+ __ Add(jssp, jssp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(lr);
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Br(ip0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1807,16 +1906,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
Register argc = x0;
- Register arg_array = x0;
+ Register arg_array = x2;
Register receiver = x1;
- Register this_arg = x2;
+ Register this_arg = x0;
Register undefined_value = x3;
Register null_value = x4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- // 1. Load receiver into x1, argArray into x0 (if present), remove all
+ // 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1841,19 +1940,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argArray
+ // -- x2 : argArray
// -- x1 : receiver
- // -- x3 : undefined root value
// -- jssp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(receiver, &receiver_not_callable);
- __ Ldr(x10, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(w10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable,
- &receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
@@ -1861,10 +1955,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Ccmp(arg_array, undefined_value, ZFlag, ne);
__ B(eq, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target in x3).
- DCHECK(undefined_value.Is(x3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1874,13 +1967,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
DCHECK(receiver.Is(x1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ Bind(&receiver_not_callable);
- {
- __ Poke(receiver, 0);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1940,14 +2026,14 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectApply");
Register argc = x0;
- Register arguments_list = x0;
+ Register arguments_list = x2;
Register target = x1;
- Register this_argument = x2;
+ Register this_argument = x4;
Register undefined_value = x3;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1974,29 +2060,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argumentsList
+ // -- x2 : argumentsList
// -- x1 : target
// -- jssp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(target, &target_not_callable);
- __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Ldr(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable, &target_not_callable);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target in x3).
- DCHECK(undefined_value.Is(x3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ Bind(&target_not_callable);
- {
- __ Poke(target, 0);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -2010,14 +2085,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
Register argc = x0;
- Register arguments_list = x0;
+ Register arguments_list = x2;
Register target = x1;
Register new_target = x3;
Register undefined_value = x4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // 1. Load target into x1 (if present), argumentsList into x2 (if present),
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -2045,53 +2120,33 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argumentsList
+ // -- x2 : argumentsList
// -- x1 : target
// -- x3 : new.target
// -- jssp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(target, &target_not_constructor);
- __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
- &target_not_constructor);
-
- // 3. Make sure the new.target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(new_target, &new_target_not_constructor);
- __ Ldr(x10, FieldMemOperand(new_target, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
- &new_target_not_constructor);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ Bind(&target_not_constructor);
- {
- __ Poke(target, 0);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ Bind(&new_target_not_constructor);
- {
- __ Poke(new_target, 0);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(x10, x0);
- __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(lr, fp);
- __ Push(x11, x1, x10);
- __ Add(fp, jssp,
- StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(x11, x1); // x1: function
+ // We do not yet push the number of arguments, to maintain a 16-byte aligned
+ // stack pointer. This is done in step (3) in
+ // Generate_ArgumentsAdaptorTrampoline.
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2104,118 +2159,30 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize)));
__ Mov(jssp, fp);
__ Pop(fp, lr);
+
+ // Drop actual parameters and receiver.
+ // TODO(all): This will need to be rounded up to a multiple of two when using
+ // the CSP, as we will have claimed an even number of slots in total for the
+ // parameters.
__ DropBySMI(x10, kXRegSize);
__ Drop(1);
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x0 : argumentsList
- // -- x1 : target
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : thisArgument
+ // -- x1 : target
+ // -- x0 : number of parameters on the stack (not including the receiver)
+ // -- x2 : arguments list (a FixedArray)
+ // -- x4 : len (number of elements to push from args)
+ // -- x3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(x2);
- Register arguments_list = x0;
- Register target = x1;
- Register new_target = x3;
-
- Register args = x0;
- Register len = x2;
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(arguments_list, &create_runtime);
-
- // Load native context.
- Register native_context = x4;
- __ Ldr(native_context, NativeContextMemOperand());
-
- // Load the map of argumentsList.
- Register arguments_list_map = x2;
- __ Ldr(arguments_list_map,
- FieldMemOperand(arguments_list, HeapObject::kMapOffset));
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ Ldr(x10, ContextMemOperand(native_context,
- Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(x11, ContextMemOperand(native_context,
- Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Cmp(arguments_list_map, x10);
- __ Ccmp(arguments_list_map, x11, ZFlag, ne);
- __ B(eq, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(arguments_list_map, x10, JS_ARRAY_TYPE);
- __ B(eq, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ Bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(target, new_target, arguments_list);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(new_target, target);
- __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
- FixedArray::kLengthOffset));
- }
- __ B(&done_create);
-
- // Try to create the list from an arguments object.
- __ Bind(&create_arguments);
- __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
- JSArgumentsObject::kLengthOffset));
- __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
- __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
- __ CompareAndBranch(len, x11, ne, &create_runtime);
- __ Mov(args, x10);
- __ B(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ Bind(&create_holey_array);
- // -- x2 : arguments_list_map
- // -- x4 : native_context
- Register arguments_list_prototype = x2;
- __ Ldr(arguments_list_prototype,
- FieldMemOperand(arguments_list_map, Map::kPrototypeOffset));
- __ Ldr(x10, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Cmp(arguments_list_prototype, x10);
- __ B(ne, &create_runtime);
- __ LoadRoot(x10, Heap::kArrayProtectorRootIndex);
- __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, PropertyCell::kValueOffset));
- __ Cmp(x11, Isolate::kProtectorValid);
- __ B(ne, &create_runtime);
- __ Ldrsw(len,
- UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
- __ B(&done_create);
-
- // Try to create the list from a JSArray object.
- __ Bind(&create_array);
- __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(x10);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- // Check if it is a holey array, the order of the cmp is important as
- // anything higher than FAST_HOLEY_ELEMENTS will fall back to runtime.
- __ Cmp(x10, FAST_HOLEY_ELEMENTS);
- __ B(hi, &create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ Tbnz(x10, 0, &create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ Ldrsw(len,
- UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
-
- __ Bind(&done_create);
- }
+ Register arguments_list = x2;
+ Register argc = x0;
+ Register len = x4;
// Check for stack overflow.
{
@@ -2233,21 +2200,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Bind(&done);
}
- // ----------- S t a t e -------------
- // -- x0 : args (a FixedArray built from argumentsList)
- // -- x1 : target
- // -- x2 : len (number of elements to push from args)
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label done, push, loop;
- Register src = x4;
+ Register src = x5;
- __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Mov(x0, len); // The 'len' argument for Call() or Construct().
+ __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(argc, argc, len); // The 'len' argument for Call() or Construct().
__ Cbz(len, &done);
Register the_hole_value = x11;
Register undefined_value = x12;
@@ -2266,28 +2225,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Bind(&done);
}
- // ----------- S t a t e -------------
- // -- x0 : argument count (len)
- // -- x1 : target
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : args[len-1]
- // -- jssp[8] : args[len-2]
- // ... : ...
- // -- jssp[8*(len-2)] : args[1]
- // -- jssp[8*(len-1)] : args[0]
- // -----------------------------------
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(new_target, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x3 : the new.target (for [[Construct]] calls)
@@ -2346,100 +2290,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ Ldrb(scratch1, MemOperand(scratch1));
- __ Cmp(scratch1, Operand(0));
- __ B(eq, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ Ldr(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ B(ne, &no_interpreter_frame);
- __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ B(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2452,8 +2305,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2463,8 +2315,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ TestAndBranchIfAnySet(w3,
- (1 << SharedFunctionInfo::kNative) |
- (1 << SharedFunctionInfo::kStrictModeFunction),
+ SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask,
&done_convert);
{
// ----------- S t a t e -------------
@@ -2527,10 +2379,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(x0);
@@ -2625,18 +2473,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
// Patch the receiver to [[BoundThis]].
__ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
@@ -2654,8 +2497,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the target to call (can be any Object).
@@ -2665,32 +2507,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
__ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
__ B(ne, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(x1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Add(x0, x0, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ Mov(x5, ExternalReference(Builtins::kCallProxy, masm->isolate()));
+ __ Ldr(x5, MemOperand(x5));
+ __ Add(x6, x5, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x6);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2700,7 +2534,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2712,155 +2546,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = x0;
- Register constructor = x1;
- Register new_target = x3;
-
- Register scratch = x2;
- Register scratch2 = x6;
-
- Register spread = x4;
- Register spread_map = x5;
-
- Register spread_len = x5;
-
- Label runtime_call, push_args;
- __ Peek(spread, 0);
- __ JumpIfSmi(spread, &runtime_call);
- __ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ B(ne, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ Ldr(scratch2, NativeContextMemOperand());
- __ Ldr(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Cmp(scratch, scratch2);
- __ B(ne, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
- __ B(ne, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ Ldr(scratch2, NativeContextMemOperand());
- __ Ldr(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ Ldr(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Cmp(scratch, scratch2);
- __ B(ne, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Cmp(scratch, FAST_HOLEY_ELEMENTS);
- __ B(hi, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ Cmp(scratch, FAST_SMI_ELEMENTS);
- __ B(eq, &no_protector_check);
- __ Cmp(scratch, FAST_ELEMENTS);
- __ B(eq, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
- __ B(ne, &runtime_call);
-
- __ Bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
- __ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ B(&push_args);
-
- __ Bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ Mov(spread, x0);
- __ Pop(argc, new_target, constructor);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ Ldrsw(spread_len,
- UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
-
- __ Bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Add(argc, argc, spread_len);
- __ Sub(argc, argc, 1);
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
- __ B(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ Mov(scratch, 0);
- Label done, push, loop;
- __ Bind(&loop);
- __ Cmp(scratch, spread_len);
- __ B(eq, &done);
- __ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
- __ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Add(scratch, scratch, Operand(1));
- __ B(&loop);
- __ Bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r3 to save it.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2982,19 +2667,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (can be any Object)
- // -- x3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
@@ -3043,115 +2715,152 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x3 : new target (passed through to callee)
// -----------------------------------
+ // The frame we are about to construct will look like:
+ //
+ // slot Adaptor frame
+ // +-----------------+--------------------------------
+ // -n-1 | receiver | ^
+ // | (parameter 0) | |
+ // |- - - - - - - - -| |
+ // -n | | Caller
+ // ... | ... | frame slots --> actual args
+ // -2 | parameter n-1 | |
+ // |- - - - - - - - -| |
+ // -1 | parameter n | v
+ // -----+-----------------+--------------------------------
+ // 0 | return addr | ^
+ // |- - - - - - - - -| |
+ // 1 | saved frame ptr | <-- frame ptr |
+ // |- - - - - - - - -| |
+ // 2 |Frame Type Marker| |
+ // |- - - - - - - - -| |
+ // 3 | function | Callee
+ // |- - - - - - - - -| frame slots
+ // 4 | num of | |
+ // | actual args | |
+ // |- - - - - - - - -| |
+ // [5] | [padding] | |
+ // |-----------------+---- |
+ // 5+pad | receiver | ^ |
+ // | (parameter 0) | | |
+ // |- - - - - - - - -| | |
+ // 6+pad | parameter 1 | | |
+ // |- - - - - - - - -| Frame slots ----> expected args
+ // 7+pad | parameter 2 | | |
+ // |- - - - - - - - -| | |
+ // | | | |
+ // ... | ... | | |
+ // | parameter m | | |
+ // |- - - - - - - - -| | |
+ // | [undefined] | | |
+ // |- - - - - - - - -| | |
+ // | | | |
+ // | ... | | |
+ // | [undefined] | v <-- stack ptr v
+ // -----+-----------------+---------------------------------
+ //
+ // There is an optional slot of padding to ensure stack alignment.
+ // If the number of expected arguments is larger than the number of actual
+ // arguments, the remaining expected slots will be filled with undefined.
+
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x10;
- Label invoke, dont_adapt_arguments, stack_overflow;
+ Label dont_adapt_arguments, stack_overflow;
- Label enough, too_few;
- __ Cmp(argc_actual, argc_expected);
- __ B(lt, &too_few);
+ Label enough_arguments;
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
- { // Enough parameters: actual >= expected
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
-
- Register copy_start = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register scratch1 = x13, scratch2 = x14;
-
- __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
-
- // Adjust for fp, lr, and the receiver.
- __ Add(copy_start, fp, 3 * kPointerSize);
- __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
- __ Sub(copy_end, copy_start, scratch2);
- __ Sub(copy_end, copy_end, kPointerSize);
- __ Mov(copy_to, jssp);
-
- // Claim space for the arguments, the receiver, and one extra slot.
- // The extra slot ensures we do not write under jssp. It will be popped
- // later.
- __ Add(scratch1, scratch2, 2 * kPointerSize);
- __ Claim(scratch1, 1);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_start, -2 * kPointerSize, PreIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_start, copy_end);
- __ B(hi, &copy_2_by_2);
-
- // Correct the space allocated for the extra slot.
- __ Drop(1);
-
- __ B(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ Bind(&too_few);
-
- Register copy_from = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register scratch1 = x13, scratch2 = x14;
-
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
-
- __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
- __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
-
- // Adjust for fp, lr, and the receiver.
- __ Add(copy_from, fp, 3 * kPointerSize);
- __ Add(copy_from, copy_from, argc_actual);
- __ Mov(copy_to, jssp);
- __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
- __ Sub(copy_end, copy_end, argc_actual);
-
- // Claim space for the arguments, the receiver, and one extra slot.
- // The extra slot ensures we do not write under jssp. It will be popped
- // later.
- __ Add(scratch1, scratch2, 2 * kPointerSize);
- __ Claim(scratch1, 1);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, -2 * kPointerSize, PreIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_to, copy_end);
- __ B(hi, &copy_2_by_2);
-
- __ Mov(copy_to, copy_end);
-
- // Fill the remaining expected arguments with undefined.
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Add(copy_end, jssp, kPointerSize);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_to, copy_end);
- __ B(hi, &fill);
-
- // Correct the space allocated for the extra slot.
- __ Drop(1);
- }
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register argc_to_copy = x13;
+ Register argc_unused_actual = x14;
+ Register scratch1 = x15, scratch2 = x16;
+
+ // We need slots for the expected arguments, with two extra slots for the
+ // number of actual arguments and the receiver.
+ __ RecordComment("-- Stack check --");
+ __ Add(scratch1, argc_expected, 2);
+ Generate_StackOverflowCheck(masm, scratch1, scratch2, &stack_overflow);
+
+ // Round up number of slots to be even, to maintain stack alignment.
+ __ RecordComment("-- Allocate callee frame slots --");
+ __ Add(scratch1, scratch1, 1);
+ __ Bic(scratch1, scratch1, 1);
+ __ Claim(scratch1, kPointerSize);
+
+ __ Mov(copy_to, jssp);
+
+ // Preparing the expected arguments is done in four steps, the order of
+ // which is chosen so we can use LDP/STP and avoid conditional branches as
+ // much as possible.
+
+ // (1) If we don't have enough arguments, fill the remaining expected
+ // arguments with undefined, otherwise skip this step.
+ __ Subs(scratch1, argc_actual, argc_expected);
+ __ Csel(argc_unused_actual, xzr, scratch1, lt);
+ __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
+ __ B(ge, &enough_arguments);
+
+ // Fill the remaining expected arguments with undefined.
+ __ RecordComment("-- Fill slots with undefined --");
+ __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
+ // We might write one slot extra, but that is ok because we'll overwrite it
+ // below.
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &fill);
+
+ // Correct copy_to, for the case where we wrote one additional slot.
+ __ Mov(copy_to, copy_end);
+
+ __ Bind(&enough_arguments);
+ // (2) Copy all of the actual arguments, or as many as we need.
+ __ RecordComment("-- Copy actual arguments --");
+ __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
+ __ Add(copy_from, fp, 2 * kPointerSize);
+ // Adjust for difference between actual and expected arguments.
+ __ Add(copy_from, copy_from,
+ Operand(argc_unused_actual, LSL, kPointerSizeLog2));
+
+ // Copy arguments. We use load/store pair instructions, so we might overshoot
+ // by one slot, but since we copy the arguments starting from the last one, if
+ // we do overshoot, the extra slot will be overwritten later by the receiver.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, 2 * kPointerSize, PostIndex));
+ __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &copy_2_by_2);
+
+ // (3) Store number of actual arguments and padding. The padding might be
+ // unnecessary, in which case it will be overwritten by the receiver.
+ __ RecordComment("-- Store number of args and padding --");
+ __ SmiTag(scratch1, argc_actual);
+ __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize));
+
+ // (4) Store receiver. Calculate target address from jssp to avoid checking
+ // for padding. Storing the receiver will overwrite either the extra slot
+ // we copied with the actual arguments, if we did copy one, or the padding we
+ // stored above.
+ __ RecordComment("-- Store receiver --");
+ __ Add(copy_from, fp, 2 * kPointerSize);
+ __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
- __ Bind(&invoke);
+ __ RecordComment("-- Call entry point --");
__ Mov(argc_actual, argc_expected);
// x0 : expected number of arguments
// x1 : function (passed through to callee)
@@ -3167,11 +2876,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
// Call the entry point without adapting the arguments.
+ __ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
__ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Jump(code_entry);
__ Bind(&stack_overflow);
+ __ RecordComment("-- Stack overflow --");
{
FrameScope frame(masm, StackFrame::MANUAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 1875958d64..571d562422 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -39,8 +39,12 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
MachineType::Pointer());
Node* shared =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
- Node* formal_parameter_count = LoadSharedFunctionInfoSpecialField(
- shared, SharedFunctionInfo::kFormalParameterCountOffset, mode);
+ CSA_SLOW_ASSERT(this, HasInstanceType(shared, SHARED_FUNCTION_INFO_TYPE));
+ Node* formal_parameter_count =
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Int32());
+ formal_parameter_count = Word32ToParameter(formal_parameter_count, mode);
+
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
@@ -77,13 +81,13 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
Node* size =
empty ? IntPtrConstant(base_size)
- : ElementOffsetFromIndex(element_count, FAST_ELEMENTS, mode,
+ : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode,
base_size + FixedArray::kHeaderSize);
Node* result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
- StoreObjectField(result, JSArray::kPropertiesOffset, empty_fixed_array);
+ StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
smi_arguments_count);
@@ -98,7 +102,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
Node* parameter_map = nullptr;
if (parameter_map_count != nullptr) {
Node* parameter_map_offset = ElementOffsetFromIndex(
- arguments_count, FAST_ELEMENTS, mode, FixedArray::kHeaderSize);
+ arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize);
parameter_map = InnerAllocate(arguments, parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
@@ -165,7 +169,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
Node* rest_count =
IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* const array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
&no_rest_parameters);
@@ -314,10 +319,10 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Comment("Fill in non-mapped parameters");
Node* argument_offset =
- ElementOffsetFromIndex(argument_count, FAST_ELEMENTS, mode,
+ ElementOffsetFromIndex(argument_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
Node* mapped_offset =
- ElementOffsetFromIndex(mapped_count, FAST_ELEMENTS, mode,
+ ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
VARIABLE(current_argument, MachineType::PointerRepresentation());
@@ -355,7 +360,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
- zero, FAST_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
+ zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(var_list2, mapped_offset, zero_offset,
[this, the_hole, elements, adjusted_map_array, &context_index,
mode](Node* offset) {
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 8c95007622..f1a07ceff0 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -84,9 +84,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void ReducePostLoopAction() {
Label ok(this);
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
- CallRuntime(Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kReduceNoInitial));
- Unreachable();
+ ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
BIND(&ok);
}
@@ -117,29 +115,30 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast);
{
+ GotoIf(SmiNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
kind = EnsureArrayPushable(a(), &runtime);
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- BuildAppendJSArray(FAST_SMI_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_SMI_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
BIND(&object_push_pre);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
- &double_push, &object_push);
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
+ &object_push);
}
BIND(&object_push);
{
- BuildAppendJSArray(FAST_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
BIND(&double_push);
{
- BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
@@ -168,11 +167,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
// In the Spec and our current implementation, the length check is already
- // performed in TypedArraySpeciesCreate. Repeating the check here to
- // keep this invariant local.
- // TODO(tebbi): Change this to a release mode check.
- CSA_ASSERT(
- this, WordEqual(len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
+ // performed in TypedArraySpeciesCreate.
+ CSA_ASSERT(this,
+ SmiLessThanOrEqual(
+ len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(o_)),
LoadInstanceType(LoadElements(a)));
a_.Bind(a);
@@ -181,70 +179,101 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// SpecCompliantMapProcessor.
- // ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mapped_value);
return a();
}
Node* FastMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// FastMapProcessor.
- // ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
-
- Label finished(this);
- Node* kind = nullptr;
- Node* elements = nullptr;
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
- // If a() is a JSArray, we can have a fast path.
// mode is SMI_PARAMETERS because k has tagged representation.
ParameterMode mode = SMI_PARAMETERS;
- Label fast(this);
- Label runtime(this);
- Label object_push_pre(this), object_push(this), double_push(this);
- BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
- &fast, &runtime);
+ Label runtime(this), finished(this);
+ Label transition_pre(this), transition_smi_fast(this),
+ transition_smi_double(this);
+ Label array_not_smi(this), array_fast(this), array_double(this);
+
+ Node* kind = LoadMapElementsKind(LoadMap(a()));
+ Node* elements = LoadElements(a());
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
+ TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
+ mapped_value);
+ Goto(&finished);
+
+ BIND(&transition_pre);
+ {
+ // array is smi. Value is either tagged or a heap number.
+ CSA_ASSERT(this, TaggedIsNotSmi(mapped_value));
+ GotoIf(IsHeapNumberMap(LoadMap(mapped_value)), &transition_smi_double);
+ Goto(&transition_smi_fast);
+ }
- BIND(&fast);
+ BIND(&array_not_smi);
{
- kind = EnsureArrayPushable(a(), &runtime);
- elements = LoadElements(a());
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
- &object_push_pre);
- TryStoreArrayElement(FAST_SMI_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
- Goto(&finished);
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &array_double,
+ &array_fast);
}
- BIND(&object_push_pre);
+ BIND(&transition_smi_fast);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
- &object_push);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ Node* const native_context = LoadNativeContext(context());
+ Node* const fast_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX);
+
+ // Since this transition is only a map change, just do it right here.
+ // Since a() doesn't have an allocation site, it's safe to do the
+ // map store directly, otherwise I'd call TransitionElementsKind().
+ StoreMap(a(), fast_map);
+ Goto(&array_fast);
}
- BIND(&object_push);
+ BIND(&array_fast);
{
- TryStoreArrayElement(FAST_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
+ TryStoreArrayElement(HOLEY_ELEMENTS, mode, &runtime, elements, k,
+ mapped_value);
Goto(&finished);
}
- BIND(&double_push);
+ BIND(&transition_smi_double);
{
- TryStoreArrayElement(FAST_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ Node* const native_context = LoadNativeContext(context());
+ Node* const double_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
+ CallStub(CodeFactory::TransitionElementsKind(
+ isolate(), HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, true),
+ context(), a(), double_map);
+ Goto(&array_double);
+ }
+
+ BIND(&array_double);
+ {
+ // TODO(mvstanton): If we use a variable for elements and bind it
+ // appropriately, we can avoid an extra load of elements by binding the
+ // value only after a transition from smi to double.
+ elements = LoadElements(a());
+ // If the mapped_value isn't a number, this will bail out to the runtime
+ // to make the transition.
+ TryStoreArrayElement(HOLEY_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
+ mapped_value);
Goto(&finished);
}
BIND(&runtime);
{
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), k,
+ mapped_value);
Goto(&finished);
}
@@ -254,12 +283,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
- // 8. c. Let mappedValue be ? Call(callbackfn, T, Ā« kValue, k, O Ā»).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
+ // 8. c. Let mapped_value be ? Call(callbackfn, T, Ā« kValue, k, O Ā»).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
- // 8. d. Perform ? Set(A, Pk, mappedValue, true).
+ // 8. d. Perform ? Set(A, Pk, mapped_value, true).
// Since we know that A is a TypedArray, this always ends up in
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
// tc39.github.io/ecma262/#sec-integerindexedelementset .
@@ -267,28 +296,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast);
// #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
- Node* num_value = ToNumber(context(), mappedValue);
+ Node* num_value = ToNumber(context(), mapped_value);
// The only way how this can bailout is because of a detached buffer.
- EmitElementStore(
- a(), k, num_value, false, source_elements_kind_,
- KeyedAccessStoreMode::STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
- &detached);
+ EmitElementStore(a(), k, num_value, false, source_elements_kind_,
+ KeyedAccessStoreMode::STANDARD_STORE, &detached);
Goto(&done);
BIND(&slow);
- CallRuntime(Runtime::kSetProperty, context(), a(), k, mappedValue,
+ CallRuntime(Runtime::kSetProperty, context(), a(), k, mapped_value,
SmiConstant(STRICT));
Goto(&done);
BIND(&detached);
- {
- // tc39.github.io/ecma262/#sec-integerindexedelementset
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kDetachedOperation),
- name_string_);
- Unreachable();
- }
+ // tc39.github.io/ecma262/#sec-integerindexedelementset
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&done);
return a();
@@ -348,7 +370,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = CallStub(CodeFactory::ToObject(isolate()), context(), receiver());
+ o_ = CallBuiltin(Builtins::kToObject, context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -372,21 +394,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
BIND(&throw_null_undefined_exception);
- {
- CallRuntime(
- Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
- HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name)));
- Unreachable();
- }
+ ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined, name);
BIND(&type_exception);
- {
- CallRuntime(Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kCalledNonCallable),
- callbackfn());
- Unreachable();
- }
+ ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
+ callbackfn());
BIND(&done);
@@ -432,8 +444,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
ForEachDirection direction = ForEachDirection::kForward) {
- name_string_ =
- HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name));
+ name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -457,27 +468,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
&throw_not_callable);
BIND(&throw_not_typed_array);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kNotTypedArray));
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
BIND(&throw_detached);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kDetachedOperation),
- name_string_);
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&throw_not_callable);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kCalledNonCallable),
- callbackfn_);
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
Label unexpected_instance_type(this);
BIND(&unexpected_instance_type);
@@ -592,7 +589,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
default:
UNREACHABLE();
- return static_cast<ElementsKind>(-1);
}
}
@@ -664,13 +660,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Fast case: load the element directly from the elements FixedArray
// and call the callback if the element is not the hole.
- DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
- int base_size = kind == FAST_ELEMENTS
+ DCHECK(kind == PACKED_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS);
+ int base_size = kind == PACKED_ELEMENTS
? FixedArray::kHeaderSize
: (FixedArray::kHeaderSize - kHeapObjectTag);
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
Node* value = nullptr;
- if (kind == FAST_ELEMENTS) {
+ if (kind == PACKED_ELEMENTS) {
value = LoadObjectField(elements, offset);
GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
} else {
@@ -712,13 +708,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* o_map = LoadMap(o());
Node* bit_field2 = LoadMapBitField2(o_map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
&maybe_double_elements, &fast_elements);
ParameterMode mode = OptimalParameterMode();
BIND(&fast_elements);
{
- VisitAllFastElementsOneKind(FAST_ELEMENTS, processor, slow, mode,
+ VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
direction);
action(this);
@@ -728,12 +724,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
BIND(&maybe_double_elements);
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_DOUBLE_ELEMENTS), slow,
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_DOUBLE_ELEMENTS), slow,
&fast_double_elements);
BIND(&fast_double_elements);
{
- VisitAllFastElementsOneKind(FAST_DOUBLE_ELEMENTS, processor, slow, mode,
+ VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
direction);
action(this);
@@ -759,7 +755,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
GotoIf(WordNotEqual(proto, initial_array_prototype), &runtime);
Node* species_protector = SpeciesProtectorConstant();
- Node* value = LoadObjectField(species_protector, Cell::kValueOffset);
+ Node* value =
+ LoadObjectField(species_protector, PropertyCell::kValueOffset);
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
GotoIf(WordEqual(value, protector_invalid), &runtime);
@@ -767,10 +764,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
GotoIf(SmiAbove(len, SmiConstant(JSArray::kInitialMaxFastElementArray)),
&runtime);
+ // We need to be conservative and start with holey because the builtins
+ // that create output arrays aren't gauranteed to be called for every
+ // element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- a_.Bind(AllocateJSArray(FAST_SMI_ELEMENTS, array_map, len, len, nullptr,
+ a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
Goto(&done);
@@ -797,7 +797,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* new_target_ = nullptr;
Node* argc_ = nullptr;
Node* fast_typed_array_target_ = nullptr;
- Node* name_string_ = nullptr;
+ const char* name_ = nullptr;
Variable k_;
Variable a_;
Variable to_;
@@ -868,8 +868,8 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
&return_undefined);
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(
- new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
+ Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size);
if (Is64()) {
Node* double_hole = Int64Constant(kHoleNanInt64);
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
@@ -935,10 +935,10 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
{
arg_index.Bind(IntPtrConstant(0));
kind = EnsureArrayPushable(receiver, &runtime);
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, receiver, args,
+ Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, receiver, args,
arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -971,21 +971,21 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
BIND(&object_push_pre);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
&object_push);
}
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, receiver, args,
+ Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, receiver, args,
arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
- Node* new_length = BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, receiver, args,
- arg_index, &double_transition);
+ Node* new_length = BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, receiver,
+ args, arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -1065,7 +1065,7 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
LoadObjectField(receiver, JSArray::kLengthOffset)));
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
- fast_elements_untagged(this);
+ fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
@@ -1098,43 +1098,55 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
SmiTag(new_length));
Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
- GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
- &fast_elements_untagged);
- GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
+ GotoIf(
+ Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
+ &fast_elements_smi);
+ GotoIf(Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
&fast_elements_tagged);
- Node* value = LoadFixedDoubleArrayElement(
- elements, IntPtrConstant(0), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &return_undefined);
- int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* memmove =
- ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
- Node* start = IntPtrAdd(
- BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_DOUBLE_ELEMENTS,
- INTPTR_PARAMETERS, header_size));
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::Pointer(), MachineType::UintPtr(), memmove,
- start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
- IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
- Node* offset = ElementOffsetFromIndex(
- new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
- if (Is64()) {
- Node* double_hole = Int64Constant(kHoleNanInt64);
- StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
- double_hole);
- } else {
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
- Node* double_hole = Int32Constant(kHoleNanLower32);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
- double_hole);
+ // Fast double elements kind:
+ {
+ CSA_ASSERT(this,
+ Int32LessThanOrEqual(elements_kind,
+ Int32Constant(HOLEY_DOUBLE_ELEMENTS)));
+
+ VARIABLE(result, MachineRepresentation::kTagged, UndefinedConstant());
+
+ Label move_elements(this);
+ result.Bind(AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
+ elements, IntPtrConstant(0), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, &move_elements)));
+ Goto(&move_elements);
+ BIND(&move_elements);
+
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ Node* memmove =
+ ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
+ Node* start = IntPtrAdd(
+ BitcastTaggedToWord(elements),
+ ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size));
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memmove,
+ start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
+ IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
+ Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size);
+ if (Is64()) {
+ Node* double_hole = Int64Constant(kHoleNanInt64);
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+ double_hole);
+ } else {
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
+ Node* double_hole = Int32Constant(kHoleNanLower32);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+ args.PopAndReturn(result.value());
}
- args.PopAndReturn(AllocateHeapNumberWithValue(value));
BIND(&fast_elements_tagged);
{
@@ -1153,14 +1165,15 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
args.PopAndReturn(value);
}
- BIND(&fast_elements_untagged);
+ BIND(&fast_elements_smi);
{
Node* value = LoadFixedArrayElement(elements, 0);
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
Node* memmove =
ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
Node* start = IntPtrAdd(
BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_SMI_ELEMENTS,
+ ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_SMI_ELEMENTS,
INTPTR_PARAMETERS, header_size));
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memmove,
@@ -1204,6 +1217,38 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(isolate(),
+ Builtins::kArrayForEachLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg,
+ UndefinedConstant(), receiver, initial_k, len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(isolate(),
+ Builtins::kArrayForEachLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg,
+ UndefinedConstant(), receiver, initial_k, len,
+ UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
@@ -1211,8 +1256,8 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1226,6 +1271,26 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Builtins::kArrayForEachLoopContinuation));
}
+TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingTypedArrayBuiltinBody(
+ "%TypedArray%.prototype.forEach",
+ &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+}
+
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1253,8 +1318,8 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1274,8 +1339,8 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1314,8 +1379,8 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1335,8 +1400,8 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1375,7 +1440,7 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1396,7 +1461,7 @@ TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1437,7 +1502,7 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1460,7 +1525,7 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1501,8 +1566,8 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1535,6 +1600,47 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. map() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k, we can glide into the loop
+ // continuation builtin.
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ CallRuntime(Runtime::kCreateDataProperty, context, array, initial_k, result);
+ // Then we have to increment k before going on.
+ initial_k = NumberInc(initial_k);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
@@ -1542,8 +1648,8 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1562,8 +1668,8 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1620,8 +1726,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
CodeStubArguments args(this, argc);
Node* array = args.GetReceiver();
- Node* search_element =
- args.GetOptionalArgumentValue(kSearchElementArg, UndefinedConstant());
+ Node* search_element = args.GetOptionalArgumentValue(kSearchElementArg);
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* intptr_zero = IntPtrConstant(0);
@@ -1684,16 +1789,15 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Node* elements_kind = LoadMapElementsKind(LoadMap(array));
Node* elements = LoadElements(array);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- GotoIf(
- Uint32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
- &if_smiorobjects);
- GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_DOUBLE_ELEMENTS)),
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ GotoIf(Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
+ &if_smiorobjects);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
&if_packed_doubles);
- GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
+ GotoIf(Word32Equal(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&if_holey_doubles);
Goto(&return_not_found);
@@ -1956,8 +2060,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&call_runtime);
{
- Node* start_from =
- args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ Node* start_from = args.GetOptionalArgumentValue(kFromIndexArg);
Runtime::FunctionId function = variant == kIncludes
? Runtime::kArrayIncludes_Slow
: Runtime::kArrayIndexOf;
@@ -1996,8 +2099,7 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
BIND(&if_isnotobject);
{
- Callable callable = CodeFactory::ToObject(isolate());
- Node* result = CallStub(callable, context, receiver);
+ Node* result = CallBuiltin(Builtins::kToObject, context, receiver);
var_array.Bind(result);
var_map.Bind(LoadMap(result));
var_type.Bind(LoadMapInstanceType(var_map.value()));
@@ -2090,7 +2192,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
GotoIfNot(SmiBelow(index, length), &set_done);
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiAdd(index, one));
@@ -2142,7 +2244,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_object_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
- Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(cell_value, invalid), &generic_values);
@@ -2157,7 +2259,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_double_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
- Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(cell_value, invalid), &generic_values);
@@ -2215,8 +2317,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&done);
- Node* invalid =
- SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
Goto(&done);
@@ -2397,20 +2498,20 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
&allocate_iterator_result);
- Node* elements = AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
Node* entry = Allocate(JSArray::kSize);
Node* map = LoadContextElement(LoadNativeContext(context),
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+ Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
StoreMapNoWriteBarrier(entry, map);
- StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+ StoreObjectFieldRoot(entry, JSArray::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
- SmiConstant(Smi::FromInt(2)));
+ SmiConstant(2));
var_value.Bind(entry);
Goto(&allocate_iterator_result);
@@ -2422,7 +2523,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Node* map = LoadContextElement(LoadNativeContext(context),
Context::ITERATOR_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -2442,12 +2543,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
}
BIND(&if_isdetached);
- {
- Node* message = SmiConstant(MessageTemplate::kDetachedOperation);
- CallRuntime(Runtime::kThrowTypeError, context, message,
- HeapConstant(operation));
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ HeapConstant(operation));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index da1602b963..73c9c7ef89 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -24,7 +24,7 @@ inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
// This is an extended version of ECMA-262 7.1.11 handling signed values
// Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
if (object->IsSmi()) {
- *out = Smi::cast(object)->value();
+ *out = Smi::ToInt(object);
return true;
} else if (object->IsHeapNumber()) {
double value = HeapNumber::cast(object)->value();
@@ -60,7 +60,7 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
if (!len_obj->IsSmi()) return false;
- *out = Max(0, Smi::cast(len_obj)->value());
+ *out = Max(0, Smi::ToInt(len_obj));
FixedArray* parameters = FixedArray::cast(object->elements());
if (object->HasSloppyArgumentsElements()) {
@@ -124,7 +124,7 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
int args_length = args->length();
if (first_added_arg >= args_length) return true;
- if (IsFastObjectElementsKind(origin_kind)) return true;
+ if (IsObjectElementsKind(origin_kind)) return true;
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
@@ -132,9 +132,9 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Object* arg = (*args)[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ target_kind = PACKED_DOUBLE_ELEMENTS;
} else {
- target_kind = FAST_ELEMENTS;
+ target_kind = PACKED_ELEMENTS;
break;
}
}
@@ -173,11 +173,11 @@ BUILTIN(ArrayPush) {
// Fast Elements Path
int to_add = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
if (to_add == 0) return Smi::FromInt(len);
// Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_push(), args);
@@ -197,7 +197,7 @@ BUILTIN(ArrayPop) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
+ uint32_t len = static_cast<uint32_t>(Smi::ToInt(array->length()));
if (len == 0) return isolate->heap()->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
@@ -228,7 +228,7 @@ BUILTIN(ArrayShift) {
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
if (len == 0) return heap->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
@@ -250,7 +250,7 @@ BUILTIN(ArrayUnshift) {
if (to_add == 0) return array->length();
// Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
@@ -279,7 +279,7 @@ BUILTIN(ArraySlice) {
AllowHeapAllocation allow_allocation;
return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
- len = Smi::cast(array->length())->value();
+ len = Smi::ToInt(array->length());
} else if (receiver->IsJSObject() &&
GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
&len)) {
@@ -352,7 +352,7 @@ BUILTIN(ArraySplice) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
}
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
// clip relative start to [0, len]
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
@@ -461,8 +461,8 @@ class ArrayConcatVisitor {
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
- Handle<SeededNumberDictionary> result = SeededNumberDictionary::AtNumberPut(
- dict, index, elm, not_a_prototype_holder);
+ Handle<SeededNumberDictionary> result =
+ SeededNumberDictionary::Set(dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -497,10 +497,10 @@ class ArrayConcatVisitor {
Handle<Object> length =
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map = JSObject::GetElementsTransitionMap(
- array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
- array->set_map(*map);
+ array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_length(*length);
array->set_elements(*storage_fixed_array());
+ array->synchronized_set_map(*map);
return array;
}
@@ -535,8 +535,8 @@ class ArrayConcatVisitor {
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- not_a_prototype_holder);
+ SeededNumberDictionary::Set(slow_storage, i, element,
+ not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -582,10 +582,10 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
@@ -597,8 +597,8 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
@@ -639,7 +639,6 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
UNREACHABLE();
- return 0;
}
// As an estimate, we assume that the prototype doesn't contain any
// inherited elements.
@@ -658,10 +657,10 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
Isolate* isolate = object->GetIsolate();
ElementsKind kind = object->GetElementsKind();
switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
DisallowHeapAllocation no_gc;
FixedArray* elements = FixedArray::cast(object->elements());
uint32_t length = static_cast<uint32_t>(elements->length());
@@ -673,8 +672,8 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
if (object->elements()->IsFixedArray()) {
DCHECK(object->elements()->length() == 0);
break;
@@ -823,10 +822,10 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<JSObject> array = Handle<JSObject>::cast(receiver);
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(array->elements()));
@@ -851,8 +850,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
});
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
@@ -964,10 +963,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
- ElementsKind kind = FAST_SMI_ELEMENTS;
+ ElementsKind kind = PACKED_SMI_ELEMENTS;
uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
+ uint32_t estimate_nof = 0;
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
Handle<Object> obj((*args)[i], isolate);
uint32_t length_estimate;
@@ -984,7 +983,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
} else {
if (obj->IsHeapObject()) {
kind = GetMoreGeneralElementsKind(
- kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
+ kind, obj->IsNumber() ? PACKED_DOUBLE_ELEMENTS : PACKED_ELEMENTS);
}
length_estimate = 1;
element_estimate = 1;
@@ -995,10 +994,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
} else {
estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
+ if (JSObject::kMaxElementCount - estimate_nof < element_estimate) {
+ estimate_nof = JSObject::kMaxElementCount;
} else {
- estimate_nof_elements += element_estimate;
+ estimate_nof += element_estimate;
}
});
@@ -1006,10 +1005,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
bool fast_case = is_array_species &&
- (estimate_nof_elements * 2) >= estimate_result_length &&
+ (estimate_nof * 2) >= estimate_result_length &&
isolate->IsIsConcatSpreadableLookupChainIntact();
- if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+ if (fast_case && kind == PACKED_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
isolate->factory()->NewFixedDoubleArray(estimate_result_length);
int j = 0;
@@ -1020,7 +1019,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj((*args)[i], isolate);
if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
+ double_storage->set(j, Smi::ToInt(*obj));
j++;
} else if (obj->IsNumber()) {
double_storage->set(j, obj->Number());
@@ -1030,8 +1029,8 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
JSArray* array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
switch (array->GetElementsKind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
FixedDoubleArray* elements =
@@ -1052,8 +1051,8 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
}
break;
}
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS: {
Object* the_hole = isolate->heap()->the_hole_value();
FixedArray* elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
@@ -1062,14 +1061,14 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
failure = true;
break;
}
- int32_t int_value = Smi::cast(element)->value();
+ int32_t int_value = Smi::ToInt(element);
double_storage->set(j, int_value);
j++;
}
break;
}
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NO_ELEMENTS:
DCHECK_EQ(0u, length);
@@ -1094,10 +1093,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
storage =
isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
} else if (is_array_species) {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for =
- estimate_nof_elements + (estimate_nof_elements >> 2);
- storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+ storage = SeededNumberDictionary::New(isolate, estimate_nof);
} else {
DCHECK(species->IsConstructor());
Handle<Object> length(Smi::kZero, isolate);
@@ -1184,7 +1180,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
}
// The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
// overflow.
- result_len += Smi::cast(array->length())->value();
+ result_len += Smi::ToInt(array->length());
DCHECK(result_len >= 0);
// Throw an Error if we overflow the FixedArray limits
if (FixedDoubleArray::kMaxLength < result_len ||
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index f661f7e82e..5cff179c63 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -61,8 +61,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kGeneratorAwait)));
+ CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
// The resulting Promise is a throwaway, so it doesn't matter what it
// resolves to. What is important is that we don't end up keeping the
@@ -104,12 +103,9 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
- NodeGenerator1 create_closure_context = [&](Node* native_context) -> Node* {
- Node* const context =
- CreatePromiseContext(native_context, AwaitContext::kLength);
+ ContextInitializer init_closure_context = [&](Node* context) {
StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
generator);
- return context;
};
// TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
@@ -119,19 +115,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// TODO(jgruber): Use a faster specialized version of
// InternalPerformPromiseThen.
- Node* const result = Await(
- context, generator, awaited, outer_promise, create_closure_context,
- Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
+ Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
+ init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+ Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
+ is_predicted_as_caught);
- Return(result);
+ // Return outer promise to avoid adding an load of the outer promise before
+ // suspending in BytecodeGenerator.
+ Return(outer_promise);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
+ Node* const generator = Parameter(Descriptor::kReceiver);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
@@ -145,8 +143,8 @@ TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
+ Node* const generator = Parameter(Descriptor::kReceiver);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index f8974acd98..95192de3eb 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -21,42 +21,116 @@ class ValueUnwrapContext {
Node* AsyncBuiltinsAssembler::Await(
Node* context, Node* generator, Node* value, Node* outer_promise,
- const NodeGenerator1& create_closure_context, int on_resolve_context_index,
- int on_reject_context_index, bool is_predicted_as_caught) {
+ int context_length, const ContextInitializer& init_closure_context,
+ int on_resolve_context_index, int on_reject_context_index,
+ bool is_predicted_as_caught) {
+ DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const native_context = LoadNativeContext(context);
+
+#ifdef DEBUG
+ {
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const instance_size = LoadMapInstanceSize(map);
+ // Assert that the strict function map has an instance size is
+ // JSFunction::kSize
+ CSA_ASSERT(this, WordEqual(instance_size, IntPtrConstant(JSFunction::kSize /
+ kPointerSize)));
+ }
+#endif
+
+#ifdef DEBUG
+ {
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance_size = LoadMapInstanceSize(map);
+ // Assert that the JSPromise map has an instance size is
+ // JSPromise::kSize
+ CSA_ASSERT(this,
+ WordEqual(instance_size,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
+ kPointerSize)));
+ }
+#endif
+
+ static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
+ static const int kThrowawayPromiseOffset =
+ kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ static const int kResolveClosureOffset =
+ kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ static const int kRejectClosureOffset =
+ kResolveClosureOffset + JSFunction::kSize;
+ static const int kTotalSize = kRejectClosureOffset + JSFunction::kSize;
+
+ Node* const base = AllocateInNewSpace(kTotalSize);
+ Node* const closure_context = base;
+ {
+ // Initialize closure context
+ InitializeFunctionContext(native_context, closure_context, context_length);
+ init_closure_context(closure_context);
+ }
+
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
- Node* const wrapped_value = AllocateAndInitJSPromise(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const promise_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
+ {
+ // Initialize Promise
+ StoreMapNoWriteBarrier(wrapped_value, promise_map);
+ InitializeJSObjectFromMap(
+ wrapped_value, promise_map,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
+ EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ PromiseInit(wrapped_value);
+ }
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« promise Ā»).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
+ Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
+ {
+ // Initialize throwawayPromise
+ StoreMapNoWriteBarrier(throwaway, promise_map);
+ InitializeJSObjectFromMap(
+ throwaway, promise_map,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
+ EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ PromiseInit(throwaway);
+ }
- Node* const native_context = LoadNativeContext(context);
+ Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
+ {
+ // Initialize resolve handler
+ InitializeNativeClosure(closure_context, native_context, on_resolve,
+ on_resolve_context_index);
+ }
- Node* const closure_context = create_closure_context(native_context);
- Node* const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
+ {
+ // Initialize reject handler
+ InitializeNativeClosure(closure_context, native_context, on_reject,
+ on_reject_context_index);
+ }
+
+ {
+ // Add PromiseHooks if needed
+ Label next(this);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
+ CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
+ outer_promise);
+ CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
+ Goto(&next);
+ BIND(&next);
+ }
- // Load and allocate on_resolve closure
- Node* const on_resolve_shared_fun =
- LoadContextElement(native_context, on_resolve_context_index);
- CSA_SLOW_ASSERT(
- this, HasInstanceType(on_resolve_shared_fun, SHARED_FUNCTION_INFO_TYPE));
- Node* const on_resolve = AllocateFunctionWithMapAndContext(
- map, on_resolve_shared_fun, closure_context);
-
- // Load and allocate on_reject closure
- Node* const on_reject_shared_fun =
- LoadContextElement(native_context, on_reject_context_index);
- CSA_SLOW_ASSERT(
- this, HasInstanceType(on_reject_shared_fun, SHARED_FUNCTION_INFO_TYPE));
- Node* const on_reject = AllocateFunctionWithMapAndContext(
- map, on_reject_shared_fun, closure_context);
-
- Node* const throwaway_promise =
- AllocateAndInitJSPromise(context, wrapped_value);
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« promise Ā»).
+ CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
// The Promise will be thrown away and not handled, but it shouldn't trigger
// unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway_promise);
+ PromiseSetHasHandler(throwaway);
Label do_perform_promise_then(this);
GotoIfNot(IsDebugActive(), &do_perform_promise_then);
@@ -82,18 +156,52 @@ Node* AsyncBuiltinsAssembler::Await(
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway_promise, key,
- outer_promise, SmiConstant(STRICT));
+ CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
+ SmiConstant(STRICT));
}
Goto(&do_perform_promise_then);
BIND(&do_perform_promise_then);
+
CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway_promise);
+ on_resolve, on_reject, throwaway);
return wrapped_value;
}
+void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
+ Node* native_context,
+ Node* function,
+ int context_index) {
+ Node* const function_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ StoreMapNoWriteBarrier(function, function_map);
+ StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(function, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
+ Heap::kUndefinedCellRootIndex);
+ StoreObjectFieldRoot(function, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
+
+ Node* shared_info = LoadContextElement(native_context, context_index);
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
+ StoreObjectFieldNoWriteBarrier(
+ function, JSFunction::kSharedFunctionInfoOffset, shared_info);
+ StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
+
+ Node* const code = BitcastTaggedToWord(
+ LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset));
+ Node* const code_entry =
+ IntPtrAdd(code, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeEntryOffset,
+ code_entry,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldRoot(function, JSFunction::kNextFunctionLinkOffset,
+ Heap::kUndefinedValueRootIndex);
+}
+
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
@@ -127,8 +235,8 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
CSA_ASSERT(this, IsBoolean(done));
- Node* const unwrapped_value = CallStub(
- CodeFactory::CreateIterResultObject(isolate()), context, value, done);
+ Node* const unwrapped_value =
+ CallBuiltin(Builtins::kCreateIterResultObject, context, value, done);
Return(unwrapped_value);
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 26bc3988ed..caba5ebd36 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -16,7 +16,7 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<Node*(Node*)> NodeGenerator1;
+ typedef std::function<void(Node*)> ContextInitializer;
// Perform steps to resume generator after `value` is resolved.
// `on_reject_context_index` is an index into the Native Context, which should
@@ -24,7 +24,8 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
// value following the reject index should be a similar value for the resolve
// closure. Returns the Promise-wrapped `value`.
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- const NodeGenerator1& create_closure_context,
+ int context_length,
+ const ContextInitializer& init_closure_context,
int on_resolve_context_index, int on_reject_context_index,
bool is_predicted_as_caught);
@@ -33,6 +34,8 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
private:
+ void InitializeNativeClosure(Node* context, Node* native_context,
+ Node* function, int context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
};
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index b3cb3d8ebd..72a6a496b7 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -113,7 +113,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
return SmiNotEqual(resume_type, SmiConstant(JSGeneratorObject::kNext));
}
- void AsyncGeneratorEnqueue(Node* context, Node* generator, Node* value,
+ void AsyncGeneratorEnqueue(CodeStubArguments* args, Node* context,
+ Node* generator, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode,
const char* method_name);
@@ -138,7 +139,7 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
// Shared implementation for the 3 Async Iterator protocol methods of Async
// Generators.
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
- Node* context, Node* generator, Node* value,
+ CodeStubArguments* args, Node* context, Node* generator, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name) {
// AsyncGeneratorEnqueue produces a new Promise, and appends it to the list
// of async generator requests to be executed. If the generator is not
@@ -175,18 +176,18 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
Goto(&done);
BIND(&done);
- Return(promise);
+ args->PopAndReturn(promise);
}
BIND(&if_receiverisincompatible);
{
Node* const error =
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- CStringConstant(method_name), generator);
+ StringConstant(method_name), generator);
CallBuiltin(Builtins::kRejectNativePromise, context, promise, error,
TrueConstant());
- Return(promise);
+ args->PopAndReturn(promise);
}
}
@@ -231,18 +232,16 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator,
- SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
+ SmiConstant(resume_mode));
- TailCallStub(CodeFactory::AsyncGeneratorResumeNext(isolate()), context,
- generator);
+ TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(1);
- Node* value = Parameter(2);
- Node* context = Parameter(5);
+ Node* generator = Parameter(Descriptor::kReceiver);
+ Node* value = Parameter(Descriptor::kAwaited);
+ Node* context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this,
HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -250,12 +249,9 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, WordNotEqual(request, UndefinedConstant()));
- NodeGenerator1 closure_context = [&](Node* native_context) -> Node* {
- Node* const context =
- CreatePromiseContext(native_context, AwaitContext::kLength);
+ ContextInitializer init_closure_context = [&](Node* context) {
StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
generator);
- return context;
};
Node* outer_promise =
@@ -265,8 +261,8 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
Node* promise =
- Await(context, generator, value, outer_promise, closure_context,
- resolve_index, reject_index, is_catchable);
+ Await(context, generator, value, outer_promise, AwaitContext::kLength,
+ init_closure_context, resolve_index, reject_index, is_catchable);
CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
@@ -330,10 +326,17 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-next
TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
"[AsyncGenerator].prototype.next");
}
@@ -341,10 +344,17 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-return
TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
- Node* generator = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
"[AsyncGenerator].prototype.return");
}
@@ -352,10 +362,17 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-throw
TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
- Node* generator = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
"[AsyncGenerator].prototype.throw");
}
@@ -461,8 +478,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
BIND(&resume_generator);
{
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
- LoadValueFromAsyncGeneratorRequest(next), generator, resume_type,
- SmiConstant(static_cast<int>(SuspendFlags::kAsyncGeneratorYield)));
+ LoadValueFromAsyncGeneratorRequest(next), generator, resume_type);
var_state.Bind(LoadGeneratorState(generator));
var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Goto(&start);
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 2caa3c9edb..f232b32700 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -66,7 +66,7 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
// Let badIteratorError be a new TypeError exception.
Node* const error =
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- CStringConstant(method_name), object);
+ StringConstant(method_name), object);
// Perform ! Call(promiseCapability.[[Reject]], undefined,
// Ā« badIteratorError Ā»).
@@ -203,7 +203,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&to_boolean);
{
Node* const result =
- CallStub(CodeFactory::ToBoolean(isolate()), context, var_done.value());
+ CallBuiltin(Builtins::kToBoolean, context, var_done.value());
var_done.Bind(result);
Goto(&done);
}
@@ -237,9 +237,8 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
Node* const promise, Label* if_exception) {
// If return is undefined, then
// Let iterResult be ! CreateIterResultObject(value, true)
- Node* const iter_result =
- CallStub(CodeFactory::CreateIterResultObject(isolate()), context, value,
- TrueConstant());
+ Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
+ context, value, TrueConstant());
// Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« iterResult Ā»).
// IfAbruptRejectPromise(nextDone, promiseCapability).
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index bd70865399..4f4839b5f6 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-call-gen.h"
+
+#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/globals.h"
#include "src/isolate.h"
@@ -12,82 +15,383 @@ namespace internal {
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
+void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny);
}
-void Builtins::Generate_TailCallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+void Builtins::Generate_CallVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call());
}
-void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructForwardVarargs(masm,
+ masm->isolate()->builtins()->Call());
}
-void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
+void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->CallFunction());
}
-void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
-}
+void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
+ Node* target, Node* new_target, Node* arguments_list, Node* context) {
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ VARIABLE(var_length, MachineRepresentation::kWord32);
+ Label if_done(this), if_arguments(this), if_array(this),
+ if_holey_array(this, Label::kDeferred),
+ if_runtime(this, Label::kDeferred);
-void Builtins::Generate_TailCall_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
+ // Perform appropriate checks on {target} (and {new_target} first).
+ if (new_target == nullptr) {
+ // Check that {target} is Callable.
+ Label if_target_callable(this),
+ if_target_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(target), &if_target_not_callable);
+ Branch(IsCallable(target), &if_target_callable, &if_target_not_callable);
+ BIND(&if_target_not_callable);
+ {
+ CallRuntime(Runtime::kThrowApplyNonFunction, context, target);
+ Unreachable();
+ }
+ BIND(&if_target_callable);
+ } else {
+ // Check that {target} is a Constructor.
+ Label if_target_constructor(this),
+ if_target_not_constructor(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(target), &if_target_not_constructor);
+ Branch(IsConstructor(target), &if_target_constructor,
+ &if_target_not_constructor);
+ BIND(&if_target_not_constructor);
+ {
+ CallRuntime(Runtime::kThrowNotConstructor, context, target);
+ Unreachable();
+ }
+ BIND(&if_target_constructor);
+
+ // Check that {new_target} is a Constructor.
+ Label if_new_target_constructor(this),
+ if_new_target_not_constructor(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(new_target), &if_new_target_not_constructor);
+ Branch(IsConstructor(new_target), &if_new_target_constructor,
+ &if_new_target_not_constructor);
+ BIND(&if_new_target_not_constructor);
+ {
+ CallRuntime(Runtime::kThrowNotConstructor, context, new_target);
+ Unreachable();
+ }
+ BIND(&if_new_target_constructor);
+ }
+
+ GotoIf(TaggedIsSmi(arguments_list), &if_runtime);
+ Node* arguments_list_map = LoadMap(arguments_list);
+ Node* native_context = LoadNativeContext(context);
+
+ // Check if {arguments_list} is an (unmodified) arguments object.
+ Node* sloppy_arguments_map =
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordEqual(arguments_list_map, sloppy_arguments_map), &if_arguments);
+ Node* strict_arguments_map =
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordEqual(arguments_list_map, strict_arguments_map), &if_arguments);
+
+ // Check if {arguments_list} is a fast JSArray.
+ Branch(IsJSArrayMap(arguments_list_map), &if_array, &if_runtime);
+
+ BIND(&if_array);
+ {
+ // Try to extract the elements from a JSArray object.
+ var_elements.Bind(
+ LoadObjectField(arguments_list, JSArray::kElementsOffset));
+ var_length.Bind(LoadAndUntagToWord32ObjectField(arguments_list,
+ JSArray::kLengthOffset));
+
+ // Holey arrays and double backing stores need special treatment.
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
+
+ Node* kind = LoadMapElementsKind(arguments_list_map);
+
+ GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_runtime);
+ Branch(Word32And(kind, Int32Constant(1)), &if_holey_array, &if_done);
+ }
+
+ BIND(&if_holey_array);
+ {
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ Node* arguments_list_prototype = LoadMapPrototype(arguments_list_map);
+ Node* initial_array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(arguments_list_prototype, initial_array_prototype),
+ &if_runtime);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ Branch(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_done, &if_runtime);
+ }
+
+ BIND(&if_arguments);
+ {
+ // Try to extract the elements from an JSArgumentsObject.
+ Node* length =
+ LoadObjectField(arguments_list, JSArgumentsObject::kLengthOffset);
+ Node* elements =
+ LoadObjectField(arguments_list, JSArgumentsObject::kElementsOffset);
+ Node* elements_length =
+ LoadObjectField(elements, FixedArray::kLengthOffset);
+ GotoIfNot(WordEqual(length, elements_length), &if_runtime);
+ var_elements.Bind(elements);
+ var_length.Bind(SmiToWord32(length));
+ Goto(&if_done);
+ }
+
+ BIND(&if_runtime);
+ {
+ // Ask the runtime to create the list (actually a FixedArray).
+ Node* elements =
+ CallRuntime(Runtime::kCreateListFromArrayLike, context, arguments_list);
+ var_elements.Bind(elements);
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(elements, FixedArray::kLengthOffset));
+ Goto(&if_done);
+ }
+
+ // Tail call to the appropriate builtin (depending on whether we have
+ // a {new_target} passed).
+ BIND(&if_done);
+ {
+ Label if_not_double(this), if_double(this);
+ Node* elements = var_elements.value();
+ Node* length = var_length.value();
+ Node* args_count = Int32Constant(0); // args already on the stack
+
+ Branch(IsFixedDoubleArray(elements), &if_double, &if_not_double);
+
+ BIND(&if_not_double);
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count, elements,
+ length);
+ }
+
+ BIND(&if_double);
+ {
+ // Kind is hardcoded here because CreateListFromArrayLike will only
+ // produce holey double arrays.
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context,
+ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
+ }
+ }
}
-void Builtins::Generate_TailCall_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
+// Takes a FixedArray of doubles and creates a new FixedArray with those doubles
+// boxed as HeapNumbers, then tail calls CallVarargs/ConstructVarargs depending
+// on whether {new_target} was passed.
+void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
+ Node* target, Node* new_target, Node* elements, Node* length,
+ Node* args_count, Node* context, Node* kind) {
+ Label if_holey_double(this), if_packed_double(this), if_done(this);
+
+ const ElementsKind new_kind = PACKED_ELEMENTS;
+ const ParameterMode mode = INTPTR_PARAMETERS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ Node* intptr_length = ChangeInt32ToIntPtr(length);
+
+ // Allocate a new FixedArray of Objects.
+ Node* new_elements =
+ AllocateFixedArray(new_kind, intptr_length, mode,
+ CodeStubAssembler::kAllowLargeObjectAllocation);
+ Branch(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
+ &if_holey_double, &if_packed_double);
+
+ BIND(&if_holey_double);
+ {
+ // Fill the FixedArray with pointers to HeapObjects.
+ CopyFixedArrayElements(HOLEY_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ }
+
+ BIND(&if_packed_double);
+ {
+ CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ {
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, new_elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count,
+ new_elements, length);
+ }
+ }
}
-void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
+ Node* target, Node* new_target, Node* spread, Node* args_count,
+ Node* context) {
+ Label if_done(this), if_holey(this), if_runtime(this, Label::kDeferred);
+
+ VARIABLE(spread_result, MachineRepresentation::kTagged, spread);
+
+ GotoIf(TaggedIsSmi(spread), &if_runtime);
+ Node* spread_map = LoadMap(spread);
+ GotoIfNot(IsJSArrayMap(spread_map), &if_runtime);
+
+ Node* native_context = LoadNativeContext(context);
+
+ // Check that we have the original ArrayPrototype.
+ Node* prototype = LoadMapPrototype(spread_map);
+ Node* array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(prototype, array_prototype), &if_runtime);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
+ GotoIfNot(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_runtime);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ Node* arr_it_proto_map = LoadMap(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ Node* initial_map = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
+ GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
+
+ Node* kind = LoadMapElementsKind(spread_map);
+
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
+
+ GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_runtime);
+ Branch(Word32And(kind, Int32Constant(1)), &if_holey, &if_done);
+
+ // Check the ArrayProtector cell for holey arrays.
+ BIND(&if_holey);
+ {
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ Branch(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_done, &if_runtime);
+ }
+
+ BIND(&if_runtime);
+ {
+ Node* spread_iterable = LoadContextElement(LoadNativeContext(context),
+ Context::SPREAD_ITERABLE_INDEX);
+ spread_result.Bind(CallJS(CodeFactory::Call(isolate()), context,
+ spread_iterable, UndefinedConstant(), spread));
+ CSA_ASSERT(this, IsJSArray(spread_result.value()));
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ {
+ // The result from if_runtime can be an array of doubles.
+ Label if_not_double(this), if_double(this);
+ Node* elements =
+ LoadObjectField(spread_result.value(), JSArray::kElementsOffset);
+ Node* length = LoadAndUntagToWord32ObjectField(spread_result.value(),
+ JSArray::kLengthOffset);
+
+ Node* kind = LoadMapElementsKind(LoadMap(elements));
+ CSA_ASSERT(this, Int32LessThanOrEqual(
+ kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)));
+
+ Branch(Int32GreaterThan(kind, Int32Constant(HOLEY_ELEMENTS)), &if_double,
+ &if_not_double);
+
+ BIND(&if_not_double);
+ {
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count,
+ elements, length);
+ }
+ }
+
+ BIND(&if_double);
+ {
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context, kind);
+ }
+ }
}
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Call());
+TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(CallWithArrayLikeDescriptor::kTarget);
+ Node* new_target = nullptr;
+ Node* arguments_list = Parameter(CallWithArrayLikeDescriptor::kArgumentsList);
+ Node* context = Parameter(CallWithArrayLikeDescriptor::kContext);
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
-void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->CallFunction());
+TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(CallWithSpreadDescriptor::kTarget);
+ Node* new_target = nullptr;
+ Node* spread = Parameter(CallWithSpreadDescriptor::kSpread);
+ Node* args_count = Parameter(CallWithSpreadDescriptor::kArgumentsCount);
+ Node* context = Parameter(CallWithSpreadDescriptor::kContext);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
new file mode 100644
index 0000000000..bbbdefc0c5
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_CALL_GEN_H_
+#define V8_BUILTINS_BUILTINS_CALL_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit CallOrConstructBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void CallOrConstructWithArrayLike(Node* target, Node* new_target,
+ Node* arguments_list, Node* context);
+ void CallOrConstructDoubleVarargs(Node* target, Node* new_target,
+ Node* elements, Node* length,
+ Node* args_count, Node* context,
+ Node* kind);
+ void CallOrConstructWithSpread(Node* target, Node* new_target, Node* spread,
+ Node* args_count, Node* context);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_CALL_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-call.cc b/deps/v8/src/builtins/builtins-call.cc
index e6598c88a2..e78fb699d0 100644
--- a/deps/v8/src/builtins/builtins-call.cc
+++ b/deps/v8/src/builtins/builtins-call.cc
@@ -11,71 +11,28 @@
namespace v8 {
namespace internal {
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return CallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return CallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return CallFunction_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCallFunction_ReceiverIsAny();
- }
- break;
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
}
UNREACHABLE();
- return Handle<Code>::null();
}
-Handle<Code> Builtins::Call(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return Call_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return Call_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return Call_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCall_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCall_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCall_ReceiverIsAny();
- }
- break;
+Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
}
UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- return CallBoundFunction();
- case TailCallMode::kAllow:
- return TailCallBoundFunction();
- }
- UNREACHABLE();
- return Handle<Code>::null();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index ebf90990a0..24dc946a24 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -41,7 +41,7 @@ Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
int GetFrameIndex(Isolate* isolate, Handle<JSObject> object) {
Handle<Object> frame_index_obj = JSObject::GetDataProperty(
object, isolate->factory()->call_site_frame_index_symbol());
- return Smi::cast(*frame_index_obj)->value();
+ return Smi::ToInt(*frame_index_obj);
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
new file mode 100644
index 0000000000..9f65065db5
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -0,0 +1,1357 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects/hash-table.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class CollectionsBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* AllocateJSMap(Node* js_map_function);
+
+ template <typename CollectionType>
+ Node* AllocateOrderedHashTable();
+ Node* AllocateJSCollection(Node* js_map_function);
+ template <typename IteratorType>
+ Node* AllocateJSCollectionIterator(Node* context, int map_index,
+ Node* collection);
+
+ Node* CallGetHashRaw(Node* const key);
+ template <typename CollectionType, int entrysize>
+ Node* CallHasRaw(Node* const table, Node* const key);
+
+ // Transitions the iterator to the non obsolete backing store.
+ // This is a NOP if the [table] is not obsolete.
+ typedef std::function<void(Node* const table, Node* const index)>
+ UpdateInTransition;
+ template <typename TableType>
+ std::tuple<Node*, Node*> Transition(
+ Node* const table, Node* const index,
+ UpdateInTransition const& update_in_transition);
+ template <typename IteratorType, typename TableType>
+ std::tuple<Node*, Node*> TransitionAndUpdate(Node* const iterator);
+ template <typename TableType>
+ std::tuple<Node*, Node*, Node*> NextSkipHoles(Node* table, Node* index,
+ Label* if_end);
+
+ // Builds code that finds OrderedHashTable entry for a key with hash code
+ // {hash} with using the comparison code generated by {key_compare}. The code
+ // jumps to {entry_found} if the key is found, or to {not_found} if the key
+ // was not found. In the {entry_found} branch, the variable
+ // entry_start_position will be bound to the index of the entry (relative to
+ // OrderedHashTable::kHashTableStartIndex).
+ //
+ // The {CollectionType} template parameter stands for the particular instance
+ // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ std::function<void(Node* other, Label* if_same, Label* if_not_same)>
+ key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found);
+
+ // Specialization for Smi.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForSmiKey(Node* table, Node* key_tagged,
+ Variable* entry_start_position,
+ Label* entry_found, Label* not_found);
+ void SameValueZeroSmi(Node* key_smi, Node* candidate_key, Label* if_same,
+ Label* if_not_same);
+
+ // Specialization for heap numbers.
+ void SameValueZeroHeapNumber(Node* key_string, Node* candidate_key,
+ Label* if_same, Label* if_not_same);
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForHeapNumberKey(Node* context, Node* table,
+ Node* key_heap_number,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+
+ // Specialization for string.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForStringKey(Node* context, Node* table,
+ Node* key_tagged,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+ Node* ComputeIntegerHashForString(Node* context, Node* string_key);
+ void SameValueZeroString(Node* context, Node* key_string, Node* candidate_key,
+ Label* if_same, Label* if_not_same);
+
+ // Specialization for non-strings, non-numbers. For those we only need
+ // reference equality to compare the keys.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForOtherKey(Node* context, Node* table,
+ Node* key,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+};
+
+template <typename CollectionType>
+Node* CollectionsBuiltinsAssembler::AllocateOrderedHashTable() {
+ static const int kCapacity = CollectionType::kMinCapacity;
+ static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
+ static const int kDataTableLength = kCapacity * CollectionType::kEntrySize;
+ static const int kFixedArrayLength =
+ CollectionType::kHashTableStartIndex + kBucketCount + kDataTableLength;
+ static const int kDataTableStartIndex =
+ CollectionType::kHashTableStartIndex + kBucketCount;
+
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kCapacity));
+ STATIC_ASSERT(kCapacity <= CollectionType::kMaxCapacity);
+
+ // Allocate the table and add the proper map.
+ const ElementsKind elements_kind = HOLEY_ELEMENTS;
+ Node* const length_intptr = IntPtrConstant(kFixedArrayLength);
+ Node* const table = AllocateFixedArray(elements_kind, length_intptr);
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(
+ length_intptr, IntPtrConstant(FixedArray::kMaxRegularLength)));
+ Heap::RootListIndex map_index = Heap::kOrderedHashTableMapRootIndex;
+ // TODO(gsathya): Directly store correct in AllocateFixedArray,
+ // instead of overwriting here.
+ StoreMapNoWriteBarrier(table, map_index);
+
+ // Initialize the OrderedHashTable fields.
+ const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
+ StoreFixedArrayElement(table, CollectionType::kNumberOfElementsIndex,
+ SmiConstant(0), barrier_mode);
+ StoreFixedArrayElement(table, CollectionType::kNumberOfDeletedElementsIndex,
+ SmiConstant(0), barrier_mode);
+ StoreFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex,
+ SmiConstant(kBucketCount), barrier_mode);
+
+ // Fill the buckets with kNotFound.
+ Node* const not_found = SmiConstant(CollectionType::kNotFound);
+ STATIC_ASSERT(CollectionType::kHashTableStartIndex ==
+ CollectionType::kNumberOfBucketsIndex + 1);
+ STATIC_ASSERT((CollectionType::kHashTableStartIndex + kBucketCount) ==
+ kDataTableStartIndex);
+ for (int i = 0; i < kBucketCount; i++) {
+ StoreFixedArrayElement(table, CollectionType::kHashTableStartIndex + i,
+ not_found, barrier_mode);
+ }
+
+ // Fill the data table with undefined.
+ STATIC_ASSERT(kDataTableStartIndex + kDataTableLength == kFixedArrayLength);
+ for (int i = 0; i < kDataTableLength; i++) {
+ StoreFixedArrayElement(table, kDataTableStartIndex + i, UndefinedConstant(),
+ barrier_mode);
+ }
+
+ return table;
+}
+
+Node* CollectionsBuiltinsAssembler::AllocateJSCollection(
+ Node* js_map_function) {
+ CSA_ASSERT(this, IsConstructorMap(LoadMap(js_map_function)));
+ Node* const initial_map = LoadObjectField(
+ js_map_function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance = AllocateJSObjectFromMap(initial_map);
+
+ StoreObjectFieldRoot(instance, JSMap::kTableOffset,
+ Heap::kUndefinedValueRootIndex);
+
+ return instance;
+}
+
+template <typename IteratorType>
+Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
+ Node* context, int map_index, Node* collection) {
+ Node* const table = LoadObjectField(collection, JSCollection::kTableOffset);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const iterator_map = LoadContextElement(native_context, map_index);
+ Node* const iterator = AllocateInNewSpace(IteratorType::kSize);
+ StoreMapNoWriteBarrier(iterator, iterator_map);
+ StoreObjectFieldRoot(iterator, IteratorType::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, IteratorType::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kTableOffset, table);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
+ SmiConstant(0));
+ return iterator;
+}
+
+TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
+ const int kIterableArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
+ Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
+ Label if_target_is_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &if_target_is_undefined);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const js_map_fun =
+ LoadContextElement(native_context, Context::JS_MAP_FUN_INDEX);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label init(this), exit(this), if_targetisnotmodified(this),
+ if_targetismodified(this);
+ Branch(WordEqual(js_map_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ BIND(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateJSCollection(js_map_fun);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, js_map_fun, new_target);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&init);
+ Node* table = AllocateOrderedHashTable<OrderedHashMap>();
+ StoreObjectField(var_result.value(), JSMap::kTableOffset, table);
+
+ GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
+
+ Label if_notcallable(this);
+ // TODO(gsathya): Add fast path for unmodified maps.
+ Node* const adder = GetProperty(context, var_result.value(),
+ isolate()->factory()->set_string());
+ GotoIf(TaggedIsSmi(adder), &if_notcallable);
+ GotoIfNot(IsCallable(adder), &if_notcallable);
+
+ IteratorBuiltinsAssembler iterator_assembler(this->state());
+ Node* const iterator = iterator_assembler.GetIterator(context, iterable);
+ GotoIf(IsUndefined(iterator), &exit);
+
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Label loop(this), if_notobject(this), if_exception(this);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ Node* const next = iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map);
+
+ Node* const next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
+
+ GotoIf(TaggedIsSmi(next_value), &if_notobject);
+ GotoIfNot(IsJSReceiver(next_value), &if_notobject);
+
+ Node* const k =
+ GetProperty(context, next_value, isolate()->factory()->zero_string());
+ GotoIfException(k, &if_exception, &var_exception);
+
+ Node* const v =
+ GetProperty(context, next_value, isolate()->factory()->one_string());
+ GotoIfException(v, &if_exception, &var_exception);
+
+ Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
+ var_result.value(), k, v);
+ GotoIfException(add_call, &if_exception, &var_exception);
+ Goto(&loop);
+
+ BIND(&if_notobject);
+ {
+ Node* const exception = MakeTypeError(
+ MessageTemplate::kIteratorValueNotAnObject, context, next_value);
+ var_exception.Bind(exception);
+ Goto(&if_exception);
+ }
+ }
+
+ BIND(&if_exception);
+ {
+ iterator_assembler.IteratorCloseOnException(context, iterator,
+ &var_exception);
+ }
+
+ BIND(&if_notcallable);
+ {
+ Node* const receiver_str = HeapConstant(isolate()->factory()->add_string());
+ ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
+ receiver_str, var_result.value());
+ }
+
+ BIND(&if_target_is_undefined);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
+ HeapConstant(isolate()->factory()->Map_string()));
+
+ BIND(&exit);
+ args.PopAndReturn(var_result.value());
+}
+
+TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
+ const int kIterableArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
+ Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
+ Label if_target_is_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &if_target_is_undefined);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const js_set_fun =
+ LoadContextElement(native_context, Context::JS_SET_FUN_INDEX);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label init(this), exit(this), if_targetisnotmodified(this),
+ if_targetismodified(this);
+ Branch(WordEqual(js_set_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ BIND(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateJSCollection(js_set_fun);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, js_set_fun, new_target);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&init);
+ Node* table = AllocateOrderedHashTable<OrderedHashSet>();
+ StoreObjectField(var_result.value(), JSSet::kTableOffset, table);
+
+ GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
+
+ Label if_notcallable(this);
+ // TODO(gsathya): Add fast path for unmodified maps.
+ Node* const adder = GetProperty(context, var_result.value(),
+ isolate()->factory()->add_string());
+ GotoIf(TaggedIsSmi(adder), &if_notcallable);
+ GotoIfNot(IsCallable(adder), &if_notcallable);
+
+ IteratorBuiltinsAssembler iterator_assembler(this->state());
+ Node* const iterator = iterator_assembler.GetIterator(context, iterable);
+ GotoIf(IsUndefined(iterator), &exit);
+
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Label loop(this), if_notobject(this), if_exception(this);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ Node* const next = iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map);
+
+ Node* const next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
+
+ Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
+ var_result.value(), next_value);
+
+ GotoIfException(add_call, &if_exception, &var_exception);
+ Goto(&loop);
+ }
+
+ BIND(&if_exception);
+ {
+ iterator_assembler.IteratorCloseOnException(context, iterator,
+ &var_exception);
+ }
+
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
+ HeapConstant(isolate()->factory()->add_string()),
+ var_result.value());
+
+ BIND(&if_target_is_undefined);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
+ HeapConstant(isolate()->factory()->Set_string()));
+
+ BIND(&exit);
+ args.PopAndReturn(var_result.value());
+}
+
+Node* CollectionsBuiltinsAssembler::CallGetHashRaw(Node* const key) {
+ Node* const function_addr = ExternalConstant(
+ ExternalReference::orderedhashmap_gethash_raw(isolate()));
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ MachineType type_ptr = MachineType::Pointer();
+ MachineType type_tagged = MachineType::AnyTagged();
+
+ Node* const result = CallCFunction2(type_tagged, type_ptr, type_tagged,
+ function_addr, isolate_ptr, key);
+
+ return result;
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ // If the key is the same, we are done.
+ GotoIf(WordEqual(candidate_key, key_smi), if_same);
+
+ // If the candidate key is smi, then it must be different (because
+ // we already checked for equality above).
+ GotoIf(TaggedIsSmi(candidate_key), if_not_same);
+
+ // If the candidate key is not smi, we still have to check if it is a
+ // heap number with the same value.
+ GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+
+ Node* const candidate_key_number = LoadHeapNumberValue(candidate_key);
+ Node* const key_number = SmiToFloat64(key_smi);
+
+ GotoIf(Float64Equal(candidate_key_number, key_number), if_same);
+
+ Goto(if_not_same);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
+ Node* table, Node* smi_key, Variable* entry_start_position,
+ Label* entry_found, Label* not_found) {
+ Node* const key_untagged = SmiUntag(smi_key);
+ Node* const hash =
+ ChangeInt32ToIntPtr(ComputeIntegerHash(key_untagged, Int32Constant(0)));
+ FindOrderedHashTableEntry<CollectionType>(
+ table, hash,
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroSmi(smi_key, other_key, if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
+ Node* context, Node* table, Node* key_tagged,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ Node* const hash = ComputeIntegerHashForString(context, key_tagged);
+ FindOrderedHashTableEntry<CollectionType>(
+ table, hash,
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroString(context, key_tagged, other_key, if_same,
+ if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
+ Node* context, Node* table, Node* key_heap_number,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ Node* tagged_hash = CallGetHashRaw(key_heap_number);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ Node* const key_float = LoadHeapNumberValue(key_heap_number);
+ FindOrderedHashTableEntry<CollectionType>(
+ table, SmiUntag(tagged_hash),
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroHeapNumber(key_float, other_key, if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
+ Node* context, Node* table, Node* key, Variable* entry_start_position,
+ Label* entry_found, Label* not_found) {
+ Node* tagged_hash = CallGetHashRaw(key);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ FindOrderedHashTableEntry<CollectionType>(
+ table, SmiUntag(tagged_hash),
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ Branch(WordEqual(key, other_key), if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+Node* CollectionsBuiltinsAssembler::ComputeIntegerHashForString(
+ Node* context, Node* string_key) {
+ VARIABLE(var_result, MachineType::PointerRepresentation());
+
+ Label hash_not_computed(this), done(this, &var_result);
+ Node* hash =
+ ChangeInt32ToIntPtr(LoadNameHash(string_key, &hash_not_computed));
+ var_result.Bind(hash);
+ Goto(&done);
+
+ BIND(&hash_not_computed);
+ Node* tagged_hash = CallGetHashRaw(string_key);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ var_result.Bind(SmiUntag(tagged_hash));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroString(Node* context,
+ Node* key_string,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ // If the candidate is not a string, the keys are not equal.
+ GotoIf(TaggedIsSmi(candidate_key), if_not_same);
+ GotoIfNot(IsString(candidate_key), if_not_same);
+
+ Branch(WordEqual(CallBuiltin(Builtins::kStringEqual, context, key_string,
+ candidate_key),
+ TrueConstant()),
+ if_same, if_not_same);
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(Node* key_float,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ Label if_smi(this), if_keyisnan(this);
+
+ // If the candidate is not a string, the keys are not equal.
+ GotoIf(TaggedIsSmi(candidate_key), &if_smi);
+ GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+
+ {
+ // {candidate_key} is a heap number.
+ Node* const candidate_float = LoadHeapNumberValue(candidate_key);
+ GotoIf(Float64Equal(key_float, candidate_float), if_same);
+
+ // SameValueZero needs to treat NaNs as equal. First check if {key_float}
+ // is NaN.
+ BranchIfFloat64IsNaN(key_float, &if_keyisnan, if_not_same);
+
+ BIND(&if_keyisnan);
+ {
+ // Return true iff {candidate_key} is NaN.
+ Branch(Float64Equal(candidate_float, candidate_float), if_not_same,
+ if_same);
+ }
+ }
+
+ BIND(&if_smi);
+ {
+ Node* const candidate_float = SmiToFloat64(candidate_key);
+ Branch(Float64Equal(key_float, candidate_float), if_same, if_not_same);
+ }
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ std::function<void(Node*, Label*, Label*)> key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ // Get the index of the bucket.
+ Node* const number_of_buckets = SmiUntag(
+ LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex));
+ Node* const bucket =
+ WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
+ Node* const first_entry = SmiUntag(LoadFixedArrayElement(
+ table, bucket, CollectionType::kHashTableStartIndex * kPointerSize));
+
+ // Walk the bucket chain.
+ {
+ VARIABLE(var_entry, MachineType::PointerRepresentation(), first_entry);
+ Label loop(this, {&var_entry, entry_start_position}),
+ continue_next_entry(this);
+ Goto(&loop);
+ BIND(&loop);
+
+ // If the entry index is the not-found sentinel, we are done.
+ GotoIf(
+ WordEqual(var_entry.value(), IntPtrConstant(CollectionType::kNotFound)),
+ not_found);
+
+ // Make sure the entry index is within range.
+ CSA_ASSERT(
+ this,
+ UintPtrLessThan(
+ var_entry.value(),
+ SmiUntag(SmiAdd(
+ LoadFixedArrayElement(table,
+ CollectionType::kNumberOfElementsIndex),
+ LoadFixedArrayElement(
+ table, CollectionType::kNumberOfDeletedElementsIndex)))));
+
+ // Compute the index of the entry relative to kHashTableStartIndex.
+ Node* entry_start =
+ IntPtrAdd(IntPtrMul(var_entry.value(),
+ IntPtrConstant(CollectionType::kEntrySize)),
+ number_of_buckets);
+ entry_start_position->Bind(entry_start);
+
+ // Load the key from the entry.
+ Node* const candidate_key = LoadFixedArrayElement(
+ table, entry_start,
+ CollectionType::kHashTableStartIndex * kPointerSize);
+
+ key_compare(candidate_key, entry_found, &continue_next_entry);
+
+ BIND(&continue_next_entry);
+ // Load the index of the next entry in the bucket chain.
+ var_entry.Bind(SmiUntag(LoadFixedArrayElement(
+ table, entry_start,
+ (CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
+ kPointerSize)));
+
+ Goto(&loop);
+ }
+}
+
+TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
+ Node* table = Parameter(Descriptor::kTable);
+ Node* index = Parameter(Descriptor::kIndex);
+ CSA_ASSERT(this, TaggedIsNotSmi(table));
+ CSA_ASSERT(this, TaggedIsSmi(index));
+ Label return_index(this), return_zero(this);
+
+ // Check if we need to update the {index}.
+ GotoIfNot(SmiLessThan(SmiConstant(Smi::kZero), index), &return_zero);
+
+ // Check if the {table} was cleared.
+ Node* number_of_deleted_elements = LoadAndUntagObjectField(
+ table, OrderedHashTableBase::kNumberOfDeletedElementsOffset);
+ GotoIf(WordEqual(number_of_deleted_elements,
+ IntPtrConstant(OrderedHashTableBase::kClearedTableSentinel)),
+ &return_zero);
+
+ VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_index, MachineRepresentation::kTagged, index);
+ Label loop(this, {&var_i, &var_index});
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* i = var_i.value();
+ GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index);
+ Node* removed_index = LoadFixedArrayElement(
+ table, i, OrderedHashTableBase::kRemovedHolesIndex * kPointerSize);
+ GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
+ Decrement(var_index, 1, SMI_PARAMETERS);
+ Increment(var_i);
+ Goto(&loop);
+ }
+
+ BIND(&return_index);
+ Return(var_index.value());
+
+ BIND(&return_zero);
+ Return(SmiConstant(Smi::kZero));
+}
+
+template <typename TableType>
+std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
+ Node* const table, Node* const index,
+ UpdateInTransition const& update_in_transition) {
+ VARIABLE(var_index, MachineType::PointerRepresentation(), index);
+ VARIABLE(var_table, MachineRepresentation::kTagged, table);
+ Label if_done(this), if_transition(this, Label::kDeferred);
+ Branch(TaggedIsSmi(
+ LoadObjectField(var_table.value(), TableType::kNextTableOffset)),
+ &if_done, &if_transition);
+
+ BIND(&if_transition);
+ {
+ Label loop(this, {&var_table, &var_index}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* table = var_table.value();
+ Node* index = var_index.value();
+
+ Node* next_table = LoadObjectField(table, TableType::kNextTableOffset);
+ GotoIf(TaggedIsSmi(next_table), &done_loop);
+
+ var_table.Bind(next_table);
+ var_index.Bind(
+ SmiUntag(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
+ NoContextConstant(), table, SmiTag(index))));
+ Goto(&loop);
+ }
+ BIND(&done_loop);
+
+ // Update with the new {table} and {index}.
+ update_in_transition(var_table.value(), var_index.value());
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ return std::tuple<Node*, Node*>(var_table.value(), var_index.value());
+}
+
+template <typename IteratorType, typename TableType>
+std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::TransitionAndUpdate(
+ Node* const iterator) {
+ return Transition<TableType>(
+ LoadObjectField(iterator, IteratorType::kTableOffset),
+ LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset),
+ [this, iterator](Node* const table, Node* const index) {
+ // Update the {iterator} with the new state.
+ StoreObjectField(iterator, IteratorType::kTableOffset, table);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
+ SmiTag(index));
+ });
+}
+
+template <typename TableType>
+std::tuple<Node*, Node*, Node*> CollectionsBuiltinsAssembler::NextSkipHoles(
+ Node* table, Node* index, Label* if_end) {
+ // Compute the used capacity for the {table}.
+ Node* number_of_buckets =
+ LoadAndUntagObjectField(table, TableType::kNumberOfBucketsOffset);
+ Node* number_of_elements =
+ LoadAndUntagObjectField(table, TableType::kNumberOfElementsOffset);
+ Node* number_of_deleted_elements =
+ LoadAndUntagObjectField(table, TableType::kNumberOfDeletedElementsOffset);
+ Node* used_capacity =
+ IntPtrAdd(number_of_elements, number_of_deleted_elements);
+
+ Node* entry_key;
+ Node* entry_start_position;
+ VARIABLE(var_index, MachineType::PointerRepresentation(), index);
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ GotoIfNot(IntPtrLessThan(var_index.value(), used_capacity), if_end);
+ entry_start_position = IntPtrAdd(
+ IntPtrMul(var_index.value(), IntPtrConstant(TableType::kEntrySize)),
+ number_of_buckets);
+ entry_key =
+ LoadFixedArrayElement(table, entry_start_position,
+ TableType::kHashTableStartIndex * kPointerSize);
+ Increment(var_index);
+ Branch(IsTheHole(entry_key), &loop, &done_loop);
+ }
+
+ BIND(&done_loop);
+ return std::tuple<Node*, Node*, Node*>(entry_key, entry_start_position,
+ var_index.value());
+}
+
+TF_BUILTIN(MapGet, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+
+ Label if_found(this), if_not_found(this);
+ Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
+ &if_not_found);
+
+ BIND(&if_found);
+ Return(LoadFixedArrayElement(table, SmiUntag(index)));
+
+ BIND(&if_not_found);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(MapHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+
+ Label if_found(this), if_not_found(this);
+ Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
+ &if_not_found);
+
+ BIND(&if_found);
+ Return(TrueConstant());
+
+ BIND(&if_not_found);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "Map.prototype.entries");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "get Map.prototype.size");
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Return(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset));
+}
+
+TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Map.prototype.forEach";
+ Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* const receiver = args.GetReceiver();
+ Node* const callback = args.GetOptionalArgumentValue(0);
+ Node* const this_arg = args.GetOptionalArgumentValue(1);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, kMethodName);
+
+ // Ensure that {callback} is actually callable.
+ Label callback_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(callback), &callback_not_callable);
+ GotoIfNot(IsCallable(callback), &callback_not_callable);
+
+ VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_table, MachineRepresentation::kTagged,
+ LoadObjectField(receiver, JSMap::kTableOffset));
+ Label loop(this, {&var_index, &var_table}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Transition {table} and {index} if there was any modification to
+ // the {receiver} while we're iterating.
+ Node* index = var_index.value();
+ Node* table = var_table.value();
+ std::tie(table, index) =
+ Transition<OrderedHashMap>(table, index, [](Node*, Node*) {});
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashMap>(table, index, &done_loop);
+
+ // Load the entry value as well.
+ Node* entry_value = LoadFixedArrayElement(
+ table, entry_start_position,
+ (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
+ kPointerSize);
+
+ // Invoke the {callback} passing the {entry_key}, {entry_value} and the
+ // {receiver}.
+ CallJS(CodeFactory::Call(isolate()), context, callback, this_arg,
+ entry_value, entry_key, receiver);
+
+ // Continue with the next entry.
+ var_index.Bind(index);
+ var_table.Bind(table);
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&callback_not_callable);
+ {
+ CallRuntime(Runtime::kThrowCalledNonCallable, context, callback);
+ Unreachable();
+ }
+}
+
+TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_KEY_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "Map.prototype.values");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Map Iterator.prototype.next";
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // Ensure that the {receiver} is actually a JSMapIterator.
+ Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
+ Node* const receiver_instance_type = LoadInstanceType(receiver);
+ GotoIf(
+ InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid);
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE),
+ &if_receiver_valid);
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid, &if_receiver_invalid);
+ BIND(&if_receiver_invalid);
+ ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ BIND(&if_receiver_valid);
+
+ // Check if the {receiver} is exhausted.
+ VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
+ VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ Label return_value(this, {&var_done, &var_value}), return_entry(this),
+ return_end(this, Label::kDeferred);
+
+ // Transition the {receiver} table if necessary.
+ Node* table;
+ Node* index;
+ std::tie(table, index) =
+ TransitionAndUpdate<JSMapIterator, OrderedHashMap>(receiver);
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashMap>(table, index, &return_end);
+ StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset,
+ SmiTag(index));
+ var_value.Bind(entry_key);
+ var_done.Bind(FalseConstant());
+
+ // Check how to return the {key} (depending on {receiver} type).
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE),
+ &return_value);
+ var_value.Bind(LoadFixedArrayElement(
+ table, entry_start_position,
+ (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
+ kPointerSize));
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
+ &return_value, &return_entry);
+
+ BIND(&return_entry);
+ {
+ Node* result =
+ AllocateJSIteratorResultForEntry(context, entry_key, var_value.value());
+ Return(result);
+ }
+
+ BIND(&return_value);
+ {
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
+ Return(result);
+ }
+
+ BIND(&return_end);
+ {
+ StoreObjectFieldRoot(receiver, JSMapIterator::kTableOffset,
+ Heap::kEmptyOrderedHashTableRootIndex);
+ Goto(&return_value);
+ }
+}
+
+TF_BUILTIN(SetHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+
+ VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
+ Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
+ entry_found(this), not_found(this), done(this);
+
+ GotoIf(TaggedIsSmi(key), &if_key_smi);
+ GotoIf(IsString(key), &if_key_string);
+ GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ FindOrderedHashTableEntryForOtherKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+
+ BIND(&if_key_smi);
+ {
+ FindOrderedHashTableEntryForSmiKey<OrderedHashSet>(
+ table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_string);
+ {
+ FindOrderedHashTableEntryForStringKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_heap_number);
+ {
+ FindOrderedHashTableEntryForHeapNumberKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&entry_found);
+ Return(TrueConstant());
+
+ BIND(&not_found);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "Set.prototype.entries");
+ Return(AllocateJSCollectionIterator<JSSetIterator>(
+ context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "get Set.prototype.size");
+ Node* const table = LoadObjectField(receiver, JSSet::kTableOffset);
+ Return(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset));
+}
+
+TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Set.prototype.forEach";
+ Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* const receiver = args.GetReceiver();
+ Node* const callback = args.GetOptionalArgumentValue(0);
+ Node* const this_arg = args.GetOptionalArgumentValue(1);
+
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, kMethodName);
+
+ // Ensure that {callback} is actually callable.
+ Label callback_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(callback), &callback_not_callable);
+ GotoIfNot(IsCallable(callback), &callback_not_callable);
+
+ VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_table, MachineRepresentation::kTagged,
+ LoadObjectField(receiver, JSSet::kTableOffset));
+ Label loop(this, {&var_index, &var_table}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Transition {table} and {index} if there was any modification to
+ // the {receiver} while we're iterating.
+ Node* index = var_index.value();
+ Node* table = var_table.value();
+ std::tie(table, index) =
+ Transition<OrderedHashSet>(table, index, [](Node*, Node*) {});
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
+
+ // Invoke the {callback} passing the {entry_key} (twice) and the {receiver}.
+ CallJS(CodeFactory::Call(isolate()), context, callback, this_arg, entry_key,
+ entry_key, receiver);
+
+ // Continue with the next entry.
+ var_index.Bind(index);
+ var_table.Bind(table);
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&callback_not_callable);
+ {
+ CallRuntime(Runtime::kThrowCalledNonCallable, context, callback);
+ Unreachable();
+ }
+}
+
+TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "Set.prototype.values");
+ Return(AllocateJSCollectionIterator<JSSetIterator>(
+ context, Context::SET_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Set Iterator.prototype.next";
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // Ensure that the {receiver} is actually a JSSetIterator.
+ Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
+ Node* const receiver_instance_type = LoadInstanceType(receiver);
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid);
+ Branch(
+ InstanceTypeEqual(receiver_instance_type, JS_SET_KEY_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid, &if_receiver_invalid);
+ BIND(&if_receiver_invalid);
+ ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ BIND(&if_receiver_valid);
+
+ // Check if the {receiver} is exhausted.
+ VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
+ VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ Label return_value(this, {&var_done, &var_value}), return_entry(this),
+ return_end(this, Label::kDeferred);
+
+ // Transition the {receiver} table if necessary.
+ Node* table;
+ Node* index;
+ std::tie(table, index) =
+ TransitionAndUpdate<JSSetIterator, OrderedHashSet>(receiver);
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashSet>(table, index, &return_end);
+ StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset,
+ SmiTag(index));
+ var_value.Bind(entry_key);
+ var_done.Bind(FalseConstant());
+
+ // Check how to return the {key} (depending on {receiver} type).
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
+ &return_value, &return_entry);
+
+ BIND(&return_entry);
+ {
+ Node* result = AllocateJSIteratorResultForEntry(context, var_value.value(),
+ var_value.value());
+ Return(result);
+ }
+
+ BIND(&return_value);
+ {
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
+ Return(result);
+ }
+
+ BIND(&return_end);
+ {
+ StoreObjectFieldRoot(receiver, JSSetIterator::kTableOffset,
+ Heap::kEmptyOrderedHashTableRootIndex);
+ Goto(&return_value);
+ }
+}
+
+TF_BUILTIN(MapLookupHashIndex, CollectionsBuiltinsAssembler) {
+ Node* const table = Parameter(Descriptor::kTable);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
+ Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
+ entry_found(this), not_found(this), done(this);
+
+ GotoIf(TaggedIsSmi(key), &if_key_smi);
+ GotoIf(IsString(key), &if_key_string);
+ GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ FindOrderedHashTableEntryForOtherKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+
+ BIND(&if_key_smi);
+ {
+ FindOrderedHashTableEntryForSmiKey<OrderedHashMap>(
+ table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_string);
+ {
+ FindOrderedHashTableEntryForStringKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_heap_number);
+ {
+ FindOrderedHashTableEntryForHeapNumberKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&entry_found);
+ Node* index = IntPtrAdd(entry_start_position.value(),
+ IntPtrConstant(OrderedHashMap::kHashTableStartIndex +
+ OrderedHashMap::kValueOffset));
+ result.Bind(SmiTag(index));
+ Goto(&done);
+
+ BIND(&not_found);
+ result.Bind(SmiConstant(-1));
+ Goto(&done);
+
+ BIND(&done);
+ Return(result.value());
+}
+
+TF_BUILTIN(WeakMapLookupHashIndex, CollectionsBuiltinsAssembler) {
+ Node* const table = Parameter(Descriptor::kTable);
+ Node* const key = Parameter(Descriptor::kKey);
+
+ Label if_found(this), if_not_found(this);
+
+ Node* const capacity =
+ SmiUntag(LoadFixedArrayElement(table, WeakHashTable::kCapacityIndex));
+ Node* const mask = IntPtrSub(capacity, IntPtrConstant(1));
+
+ Node* const hash = SmiUntag(CallGetHashRaw(key));
+
+ GotoIf(IntPtrLessThan(hash, IntPtrConstant(0)), &if_not_found);
+
+ // See HashTable::FirstProbe().
+ Node* entry = WordAnd(hash, mask);
+
+ VARIABLE(var_count, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_entry, MachineType::PointerRepresentation(), entry);
+ Variable* loop_vars[] = {&var_count, &var_entry};
+ Label loop(this, arraysize(loop_vars), loop_vars);
+ Goto(&loop);
+ BIND(&loop);
+ Node* index;
+ {
+ Node* entry = var_entry.value();
+
+ index = IntPtrMul(entry, IntPtrConstant(WeakHashTable::kEntrySize));
+ index =
+ IntPtrAdd(index, IntPtrConstant(WeakHashTable::kElementsStartIndex));
+
+ Node* current = LoadFixedArrayElement(table, index);
+ GotoIf(WordEqual(current, UndefinedConstant()), &if_not_found);
+ GotoIf(WordEqual(current, key), &if_found);
+
+ // See HashTable::NextProbe().
+ Increment(var_count);
+ entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
+
+ var_entry.Bind(entry);
+ Goto(&loop);
+ }
+
+ BIND(&if_not_found);
+ Return(SmiConstant(-1));
+
+ BIND(&if_found);
+ Return(SmiTag(IntPtrAdd(index, IntPtrConstant(1))));
+}
+
+TF_BUILTIN(WeakMapGet, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_undefined(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_undefined);
+ GotoIfNot(IsJSReceiver(key), &return_undefined);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_undefined);
+
+ Return(LoadFixedArrayElement(table, SmiUntag(index)));
+
+ BIND(&return_undefined);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(WeakMapHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_false(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_false);
+ GotoIfNot(IsJSReceiver(key), &return_false);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+
+ Return(TrueConstant());
+
+ BIND(&return_false);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(WeakSetHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_false(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
+ "WeakSet.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_false);
+ GotoIfNot(IsJSReceiver(key), &return_false);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+
+ Return(TrueConstant());
+
+ BIND(&return_false);
+ Return(FalseConstant());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
new file mode 100644
index 0000000000..0497eaaac1
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(MapClear) {
+ HandleScope scope(isolate);
+ const char* const kMethodName = "Map.prototype.clear";
+ CHECK_RECEIVER(JSMap, map, kMethodName);
+ JSMap::Clear(map);
+ return isolate->heap()->undefined_value();
+}
+
+BUILTIN(SetClear) {
+ HandleScope scope(isolate);
+ const char* const kMethodName = "Set.prototype.clear";
+ CHECK_RECEIVER(JSSet, set, kMethodName);
+ JSSet::Clear(set);
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index a43fe136d0..c3a7bd6557 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -14,45 +14,114 @@ namespace internal {
// -----------------------------------------------------------------------------
// Console
-#define CONSOLE_METHOD_LIST(V) \
- V(Debug) \
- V(Error) \
- V(Info) \
- V(Log) \
- V(Warn) \
- V(Dir) \
- V(DirXml) \
- V(Table) \
- V(Trace) \
- V(Group) \
- V(GroupCollapsed) \
- V(GroupEnd) \
- V(Clear) \
- V(Count) \
- V(Assert) \
- V(MarkTimeline) \
- V(Profile) \
- V(ProfileEnd) \
- V(Timeline) \
- V(TimelineEnd) \
- V(Time) \
- V(TimeEnd) \
- V(TimeStamp)
-
-#define CONSOLE_BUILTIN_IMPLEMENTATION(name) \
- BUILTIN(Console##name) { \
- HandleScope scope(isolate); \
- if (isolate->console_delegate()) { \
- debug::ConsoleCallArguments wrapper(args); \
- isolate->console_delegate()->name(wrapper); \
- CHECK(!isolate->has_pending_exception()); \
- CHECK(!isolate->has_scheduled_exception()); \
- } \
- return isolate->heap()->undefined_value(); \
+#define CONSOLE_METHOD_LIST(V) \
+ V(Debug, debug) \
+ V(Error, error) \
+ V(Info, info) \
+ V(Log, log) \
+ V(Warn, warn) \
+ V(Dir, dir) \
+ V(DirXml, dirXml) \
+ V(Table, table) \
+ V(Trace, trace) \
+ V(Group, group) \
+ V(GroupCollapsed, groupCollapsed) \
+ V(GroupEnd, groupEnd) \
+ V(Clear, clear) \
+ V(Count, count) \
+ V(Assert, assert) \
+ V(MarkTimeline, markTimeline) \
+ V(Profile, profile) \
+ V(ProfileEnd, profileEnd) \
+ V(Timeline, timeline) \
+ V(TimelineEnd, timelineEnd) \
+ V(Time, time) \
+ V(TimeEnd, timeEnd) \
+ V(TimeStamp, timeStamp)
+
+namespace {
+void ConsoleCall(
+ Isolate* isolate, internal::BuiltinArguments& args,
+ void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext&)) {
+ HandleScope scope(isolate);
+ if (!isolate->console_delegate()) return;
+ debug::ConsoleCallArguments wrapper(args);
+ Handle<Object> context_id_obj = JSObject::GetDataProperty(
+ args.target(), isolate->factory()->console_context_id_symbol());
+ int context_id =
+ context_id_obj->IsSmi() ? Handle<Smi>::cast(context_id_obj)->value() : 0;
+ Handle<Object> context_name_obj = JSObject::GetDataProperty(
+ args.target(), isolate->factory()->console_context_name_symbol());
+ Handle<String> context_name = context_name_obj->IsString()
+ ? Handle<String>::cast(context_name_obj)
+ : isolate->factory()->anonymous_string();
+ (isolate->console_delegate()->*func)(
+ wrapper,
+ v8::debug::ConsoleContext(context_id, Utils::ToLocal(context_name)));
+ CHECK(!isolate->has_pending_exception());
+ CHECK(!isolate->has_scheduled_exception());
+}
+} // namespace
+
+#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name) \
+ BUILTIN(Console##call) { \
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::call); \
+ return isolate->heap()->undefined_value(); \
}
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
+namespace {
+void InstallContextFunction(Handle<JSObject> target, const char* name,
+ Builtins::Name call, int context_id,
+ Handle<Object> context_name) {
+ Factory* const factory = target->GetIsolate()->factory();
+
+ Handle<Code> call_code(target->GetIsolate()->builtins()->builtin(call));
+
+ Handle<String> name_string =
+ Name::ToFunctionName(factory->InternalizeUtf8String(name))
+ .ToHandleChecked();
+ Handle<JSFunction> fun =
+ factory->NewFunctionWithoutPrototype(name_string, call_code, SLOPPY);
+ fun->shared()->set_native(true);
+ fun->shared()->DontAdaptArguments();
+ fun->shared()->set_length(1);
+
+ JSObject::AddProperty(fun, factory->console_context_id_symbol(),
+ handle(Smi::FromInt(context_id), target->GetIsolate()),
+ NONE);
+ if (context_name->IsString()) {
+ JSObject::AddProperty(fun, factory->console_context_name_symbol(),
+ context_name, NONE);
+ }
+ JSObject::AddProperty(target, name_string, fun, NONE);
+}
+} // namespace
+
+BUILTIN(ConsoleContext) {
+ HandleScope scope(isolate);
+
+ Factory* const factory = isolate->factory();
+ Handle<String> name = factory->InternalizeUtf8String("Context");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
+ JSFunction::SetPrototype(cons, empty);
+ Handle<JSObject> context = factory->NewJSObject(cons, TENURED);
+ DCHECK(context->IsJSObject());
+ int id = isolate->last_console_context_id() + 1;
+ isolate->set_last_console_context_id(id);
+
+#define CONSOLE_BUILTIN_SETUP(call, name) \
+ InstallContextFunction(context, #name, Builtins::kConsole##call, id, \
+ args.at(1));
+ CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_SETUP)
+#undef CONSOLE_BUILTIN_SETUP
+
+ return *context;
+}
+
#undef CONSOLE_METHOD_LIST
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 1769e65e83..d7a2f8e34e 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/ast/ast.h"
+#include "src/builtins/builtins-call-gen.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
@@ -17,17 +18,73 @@
namespace v8 {
namespace internal {
+void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructVarargs(masm,
+ masm->isolate()->builtins()->Construct());
+}
+
void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Construct());
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->Construct());
}
void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm,
- masm->isolate()->builtins()->ConstructFunction());
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->ConstructFunction());
+}
+
+TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(ConstructWithArrayLikeDescriptor::kTarget);
+ Node* new_target = Parameter(ConstructWithArrayLikeDescriptor::kNewTarget);
+ Node* arguments_list =
+ Parameter(ConstructWithArrayLikeDescriptor::kArgumentsList);
+ Node* context = Parameter(ConstructWithArrayLikeDescriptor::kContext);
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
+}
+
+TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(ConstructWithSpreadDescriptor::kTarget);
+ Node* new_target = Parameter(ConstructWithSpreadDescriptor::kNewTarget);
+ Node* spread = Parameter(ConstructWithSpreadDescriptor::kSpread);
+ Node* args_count = Parameter(ConstructWithSpreadDescriptor::kArgumentsCount);
+ Node* context = Parameter(ConstructWithSpreadDescriptor::kContext);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
typedef compiler::Node Node;
+Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) {
+ Label if_fixed_array(this), if_fixed_double_array(this), done(this);
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Node* capacity = LoadAndUntagFixedArrayBaseLength(fixed_array);
+ Branch(IsFixedDoubleArrayMap(LoadMap(fixed_array)), &if_fixed_double_array,
+ &if_fixed_array);
+ BIND(&if_fixed_double_array);
+ {
+ ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ Node* copy = AllocateFixedArray(kind, capacity);
+ CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
+ SKIP_WRITE_BARRIER);
+ result.Bind(copy);
+ Goto(&done);
+ }
+
+ BIND(&if_fixed_array);
+ {
+ ElementsKind kind = PACKED_ELEMENTS;
+ Node* copy = AllocateFixedArray(kind, capacity);
+ CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
+ UPDATE_WRITE_BARRIER);
+ result.Bind(copy);
+ Goto(&done);
+ }
+ BIND(&done);
+ // Manually copy over the map of the incoming array to preserve the elements
+ // kind.
+ StoreMap(result.value(), LoadMap(fixed_array));
+ return result.value();
+}
+
Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Node* feedback_vector,
Node* slot,
@@ -36,107 +93,36 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Factory* factory = isolate->factory();
IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
- // Create a new closure from the given function info in new space
- Node* result = Allocate(JSFunction::kSize);
-
- // Calculate the index of the map we should install on the function based on
- // the FunctionKind and LanguageMode of the function.
- // Note: Must be kept in sync with Context::FunctionMapIndex
Node* compiler_hints =
LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
MachineType::Uint32());
- Node* is_strict = Word32And(
- compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
-
- Label if_normal(this), if_generator(this), if_async(this),
- if_class_constructor(this), if_function_without_prototype(this),
- load_map(this);
- VARIABLE(map_index, MachineType::PointerRepresentation());
-
- STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
- Node* is_not_normal =
- Word32And(compiler_hints,
- Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
- GotoIfNot(is_not_normal, &if_normal);
-
- Node* is_generator = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_generator, &if_generator);
-
- Node* is_async = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_async, &if_async);
-
- Node* is_class_constructor = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_class_constructor, &if_class_constructor);
-
- if (FLAG_debug_code) {
- // Function must be a function without a prototype.
- CSA_ASSERT(
- this,
- Word32And(compiler_hints,
- Int32Constant((FunctionKind::kAccessorFunction |
- FunctionKind::kArrowFunction |
- FunctionKind::kConciseMethod)
- << SharedFunctionInfo::kFunctionKindShift)));
- }
- Goto(&if_function_without_prototype);
-
- BIND(&if_normal);
- {
- map_index.Bind(SelectIntPtrConstant(is_strict,
- Context::STRICT_FUNCTION_MAP_INDEX,
- Context::SLOPPY_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_generator);
- {
- Node* is_async =
- Word32And(compiler_hints,
- Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- map_index.Bind(SelectIntPtrConstant(
- is_async, Context::ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
- Context::GENERATOR_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_async);
- {
- map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
- BIND(&if_class_constructor);
- {
- map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_function_without_prototype);
- {
- map_index.Bind(
- IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&load_map);
+ // The calculation of |function_map_index| must be in sync with
+ // SharedFunctionInfo::function_map_index().
+ Node* function_map_index =
+ IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
+ compiler_hints),
+ IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ function_map_index,
+ IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
// Get the function map in the current native context and set that
// as the map of the allocated object.
Node* native_context = LoadNativeContext(context);
- Node* map_slot_value =
- LoadFixedArrayElement(native_context, map_index.value());
- StoreMapNoWriteBarrier(result, map_slot_value);
+ Node* function_map = LoadContextElement(native_context, function_map_index);
+
+ // Create a new closure from the given function info in new space
+ Node* instance_size_in_bytes =
+ TimesPointerSize(LoadMapInstanceSize(function_map));
+ Node* result = Allocate(instance_size_in_bytes);
+ StoreMapNoWriteBarrier(result, function_map);
+ InitializeJSObjectBody(result, function_map, instance_size_in_bytes,
+ JSFunction::kSize);
// Initialize the rest of the function.
Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOrHashOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
empty_fixed_array);
@@ -164,23 +150,27 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
}
{
// If the feedback vector has optimized code, check whether it is marked
- // for deopt and, if so, clear it.
- Label optimized_code_ok(this);
+ // for deopt and, if so, clear the slot.
+ Label optimized_code_ok(this), clear_optimized_code(this);
Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
- Node* optimized_code_cell =
+ Node* optimized_code_cell_slot =
LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
+ GotoIf(TaggedIsSmi(optimized_code_cell_slot), &optimized_code_ok);
+
Node* optimized_code =
- LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
+ LoadWeakCellValue(optimized_code_cell_slot, &clear_optimized_code);
Node* code_flags = LoadObjectField(
optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
Node* marked_for_deopt =
DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
- GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);
+ Branch(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok,
+ &clear_optimized_code);
- // Code is marked for deopt, clear the optimized code slot.
+ // Cell is empty or code is marked for deopt, clear the optimized code slot.
+ BIND(&clear_optimized_code);
StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
- EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
+ SmiConstant(0), SKIP_WRITE_BARRIER);
Goto(&optimized_code_ok);
BIND(&optimized_code_ok);
@@ -207,6 +197,24 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
return result;
}
+Node* ConstructorBuiltinsAssembler::LoadFeedbackVectorSlot(
+ Node* closure, Node* literal_index) {
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ return LoadFixedArrayElement(feedback_vector, literal_index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
+}
+
+Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
+ return TaggedIsSmi(literal_site);
+}
+
+Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
+ CSA_ASSERT(this, IsAllocationSite(site));
+ return LoadObjectField(site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
+}
+
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
Node* context = Parameter(FastNewClosureDescriptor::kContext);
@@ -304,7 +312,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
ParameterMode mode = INTPTR_PARAMETERS;
Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
Node* length = IntPtrAdd(slots, min_context_slots);
- Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
+ Node* size = GetFixedArrayAllocationSize(length, PACKED_ELEMENTS, mode);
// Create a new closure from the given function info in new space
Node* function_context = AllocateInNewSpace(size);
@@ -340,7 +348,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
// Initialize the rest of the slots to undefined.
Node* undefined = UndefinedConstant();
BuildFastFixedArrayForEach(
- function_context, FAST_ELEMENTS, min_context_slots, length,
+ function_context, PACKED_ELEMENTS, min_context_slots, length,
[this, undefined](Node* context, Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
undefined);
@@ -374,14 +382,11 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
Label call_runtime(this, Label::kDeferred), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
-
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
- GotoIf(IsUndefined(boilerplate), &call_runtime);
-
+ Node* literal_site = LoadFeedbackVectorSlot(closure, literal_index);
+ GotoIf(NotHasBoilerplate(literal_site), &call_runtime);
{
+ Node* boilerplate = literal_site;
+ CSA_ASSERT(this, IsJSRegExp(boilerplate));
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Node* copy = Allocate(size);
for (int offset = 0; offset < size; offset += kPointerSize) {
@@ -452,24 +457,18 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
return_result(this);
VARIABLE(result, MachineRepresentation::kTagged);
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- Node* allocation_site = LoadFixedArrayElement(
- feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);
-
- GotoIf(IsUndefined(allocation_site), call_runtime);
- allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* allocation_site = LoadFeedbackVectorSlot(closure, literal_index);
+ GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
- Node* boilerplate =
- LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
Node* boilerplate_map = LoadMap(boilerplate);
+ CSA_ASSERT(this, IsJSArrayMap(boilerplate_map));
Node* boilerplate_elements = LoadElements(boilerplate);
Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
allocation_site =
allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
- Node* zero = SmiConstant(Smi::kZero);
+ Node* zero = SmiConstant(0);
GotoIf(SmiEqual(capacity, zero), &zero_capacity);
Node* elements_map = LoadMap(boilerplate_elements);
@@ -478,25 +477,10 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
{
Comment("fast double elements path");
- if (FLAG_debug_code) {
- Label correct_elements_map(this), abort(this, Label::kDeferred);
- Branch(IsFixedDoubleArrayMap(elements_map), &correct_elements_map,
- &abort);
-
- BIND(&abort);
- {
- Node* abort_id = SmiConstant(
- Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
- CallRuntime(Runtime::kAbort, context, abort_id);
- result.Bind(UndefinedConstant());
- Goto(&return_result);
- }
- BIND(&correct_elements_map);
- }
-
+ if (FLAG_debug_code) CSA_CHECK(this, IsFixedDoubleArrayMap(elements_map));
Node* array =
NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
+ allocation_site, capacity, PACKED_DOUBLE_ELEMENTS);
result.Bind(array);
Goto(&return_result);
}
@@ -506,7 +490,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
Comment("fast elements path");
Node* array =
NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, FAST_ELEMENTS);
+ allocation_site, capacity, PACKED_ELEMENTS);
result.Bind(array);
Goto(&return_result);
}
@@ -536,7 +520,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
BIND(&allocate_without_elements);
{
Node* array = AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ PACKED_ELEMENTS, boilerplate_map, length.value(), allocation_site);
StoreObjectField(array, JSObject::kElementsOffset, elements.value());
result.Bind(array);
Goto(&return_result);
@@ -561,13 +545,15 @@ void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
BIND(&call_runtime);
{
Comment("call runtime");
- Node* flags =
- SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
- (allocation_site_mode == TRACK_ALLOCATION_SITE
- ? 0
- : ArrayLiteral::kDisableMementos)));
+ int flags = AggregateLiteral::kIsShallow;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ // Force initial allocation sites on the initial literal setup step.
+ flags |= AggregateLiteral::kNeedsInitialAllocationSite;
+ } else {
+ flags |= AggregateLiteral::kDisableMementos;
+ }
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
- literal_index, constant_elements, flags));
+ literal_index, constant_elements, SmiConstant(flags)));
}
}
@@ -581,31 +567,28 @@ TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Label* call_runtime, Node* closure, Node* literals_index) {
- Node* allocation_site;
- {
- // Load the alloation site.
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- allocation_site = LoadFixedArrayElement(feedback_vector, literals_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
- GotoIf(IsUndefined(allocation_site), call_runtime);
- }
+ Node* allocation_site = LoadFeedbackVectorSlot(closure, literals_index);
+ GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
- Node* boilerplate =
- LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
Node* boilerplate_map = LoadMap(boilerplate);
+ CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
VARIABLE(var_properties, MachineRepresentation::kTagged);
{
+ Node* bit_field_3 = LoadMapBitField3(boilerplate_map);
+ GotoIf(IsSetWord32<Map::Deprecated>(bit_field_3), call_runtime);
// Directly copy over the property store for dict-mode boilerplates.
- Label if_dictionary(this), if_fast(this), allocate_object(this);
- Branch(IsDictionaryMap(boilerplate_map), &if_dictionary, &if_fast);
+ Label if_dictionary(this), if_fast(this), done(this);
+ Branch(IsSetWord32<Map::DictionaryMap>(bit_field_3), &if_dictionary,
+ &if_fast);
BIND(&if_dictionary);
{
+ Comment("Copy dictionary properties");
var_properties.Bind(
CopyNameDictionary(LoadProperties(boilerplate), call_runtime));
// Slow objects have no in-object properties.
- Goto(&allocate_object);
+ Goto(&done);
}
BIND(&if_fast);
{
@@ -613,14 +596,38 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* boilerplate_properties = LoadProperties(boilerplate);
GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
var_properties.Bind(EmptyFixedArrayConstant());
- Goto(&allocate_object);
+ Goto(&done);
}
- BIND(&allocate_object);
+ BIND(&done);
}
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ {
+ // Copy the elements backing store, assuming that it's flat.
+ Label if_empty_fixed_array(this), if_copy_elements(this), done(this);
+ Node* boilerplate_elements = LoadElements(boilerplate);
+ Branch(IsEmptyFixedArray(boilerplate_elements), &if_empty_fixed_array,
+ &if_copy_elements);
+
+ BIND(&if_empty_fixed_array);
+ var_elements.Bind(boilerplate_elements);
+ Goto(&done);
+
+ BIND(&if_copy_elements);
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
+ var_elements.Bind(CopyFixedArrayBase(boilerplate_elements));
+ Goto(&done);
+ BIND(&done);
+ }
+
+ // Ensure new-space allocation for a fresh JSObject so we can skip write
+ // barriers when copying all object fields.
+ STATIC_ASSERT(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize);
Node* instance_size = TimesPointerSize(LoadMapInstanceSize(boilerplate_map));
Node* allocation_size = instance_size;
- if (FLAG_allocation_site_pretenuring) {
+ bool needs_allocation_memento = FLAG_allocation_site_pretenuring;
+ if (needs_allocation_memento) {
// Prepare for inner-allocating the AllocationMemento.
allocation_size =
IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
@@ -628,43 +635,90 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* copy = AllocateInNewSpace(allocation_size);
{
+ Comment("Initialize Literal Copy");
// Initialize Object fields.
StoreMapNoWriteBarrier(copy, boilerplate_map);
- StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOrHashOffset,
var_properties.value());
- // TODO(cbruni): support elements cloning for object literals.
- CSA_ASSERT(this, IsEmptyFixedArray(LoadElements(boilerplate)));
StoreObjectFieldNoWriteBarrier(copy, JSObject::kElementsOffset,
- EmptyFixedArrayConstant());
+ var_elements.value());
}
- // Copy over in-object properties.
- Node* start_offset = IntPtrConstant(JSObject::kHeaderSize);
- BuildFastLoop(start_offset, instance_size,
- [=](Node* offset) {
- // The Allocate above guarantees that the copy lies in new
- // space. This allows us to skip write barriers. This is
- // necessary since we may also be copying unboxed doubles.
- // TODO(verwaest): Allocate and fill in double boxes.
- // TODO(cbruni): decode map information and support mutable
- // heap numbers.
- Node* field = LoadObjectField(boilerplate, offset);
- StoreObjectFieldNoWriteBarrier(copy, offset, field);
- },
- kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-
- if (FLAG_allocation_site_pretenuring) {
- Node* memento = InnerAllocate(copy, instance_size);
- StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
- StoreObjectFieldNoWriteBarrier(
- memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
- Node* memento_create_count = LoadObjectField(
- allocation_site, AllocationSite::kPretenureCreateCountOffset);
- memento_create_count =
- SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
- StoreObjectFieldNoWriteBarrier(allocation_site,
- AllocationSite::kPretenureCreateCountOffset,
- memento_create_count);
+ // Initialize the AllocationMemento before potential GCs due to heap number
+ // allocation when copying the in-object properties.
+ if (needs_allocation_memento) {
+ InitializeAllocationMemento(copy, instance_size, allocation_site);
+ }
+
+ {
+ // Copy over in-object properties.
+ Label continue_with_write_barrier(this), done_init(this);
+ VARIABLE(offset, MachineType::PointerRepresentation(),
+ IntPtrConstant(JSObject::kHeaderSize));
+ // Mutable heap numbers only occur on 32-bit platforms.
+ bool may_use_mutable_heap_numbers =
+ FLAG_track_double_fields && !FLAG_unbox_double_fields;
+ {
+ Comment("Copy in-object properties fast");
+ Label continue_fast(this, &offset);
+ Branch(WordEqual(offset.value(), instance_size), &done_init,
+ &continue_fast);
+ BIND(&continue_fast);
+ Node* field = LoadObjectField(boilerplate, offset.value());
+ if (may_use_mutable_heap_numbers) {
+ Label store_field(this);
+ GotoIf(TaggedIsSmi(field), &store_field);
+ GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
+ Goto(&store_field);
+ BIND(&store_field);
+ }
+ StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
+ offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+ Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
+ &done_init);
+ }
+
+ if (!may_use_mutable_heap_numbers) {
+ BIND(&done_init);
+ return copy;
+ }
+ // Continue initializing the literal after seeing the first sub-object
+ // potentially causing allocation. In this case we prepare the new literal
+ // by copying all pending fields over from the boilerplate and emit full
+ // write barriers from here on.
+ BIND(&continue_with_write_barrier);
+ {
+ Comment("Copy in-object properties slow");
+ BuildFastLoop(offset.value(), instance_size,
+ [=](Node* offset) {
+ Node* field = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, field);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Comment("Copy mutable HeapNumber values");
+ BuildFastLoop(offset.value(), instance_size,
+ [=](Node* offset) {
+ Node* field = LoadObjectField(copy, offset);
+ Label copy_mutable_heap_number(this, Label::kDeferred),
+ continue_loop(this);
+ // We only have to clone complex field values.
+ GotoIf(TaggedIsSmi(field), &continue_loop);
+ Branch(IsMutableHeapNumber(field),
+ &copy_mutable_heap_number, &continue_loop);
+ BIND(&copy_mutable_heap_number);
+ {
+ Node* double_value = LoadHeapNumberValue(field);
+ Node* mutable_heap_number =
+ AllocateHeapNumberWithValue(double_value, MUTABLE);
+ StoreObjectField(copy, offset, mutable_heap_number);
+ Goto(&continue_loop);
+ }
+ BIND(&continue_loop);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Goto(&done_init);
+ }
+ BIND(&done_init);
}
return copy;
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 9b04eb378e..fe049893eb 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -41,6 +41,11 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
Node* boilerplate_elements, Node* allocation_site,
Node* capacity, ElementsKind kind);
+ Node* CopyFixedArrayBase(Node* elements);
+
+ Node* LoadFeedbackVectorSlot(Node* closure, Node* literal_index);
+ Node* NotHasBoilerplate(Node* literal_site);
+ Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 5fe2cb03bd..9edeb56e1e 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -2,28 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-conversion-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-class ConversionBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void Generate_NonPrimitiveToPrimitive(Node* context, Node* input,
- ToPrimitiveHint hint);
-
- void Generate_OrdinaryToPrimitive(Node* context, Node* input,
- OrdinaryToPrimitiveHint hint);
-};
-
// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
Node* context, Node* input, ToPrimitiveHint hint) {
@@ -136,6 +124,56 @@ TF_BUILTIN(ToString, CodeStubAssembler) {
Return(ToString(context, input));
}
+// ES6 section 7.1.1 ToPrimitive( argument, "default" ) followed by
+// ES6 section 7.1.12 ToString ( argument )
+compiler::Node* ConversionBuiltinsAssembler::ToPrimitiveToString(
+ Node* context, Node* input, Variable* feedback) {
+ Label is_string(this), to_primitive(this, Label::kDeferred),
+ to_string(this, Label::kDeferred), done(this);
+ VARIABLE(result, MachineRepresentation::kTagged, input);
+
+ GotoIf(TaggedIsSmi(input), &to_string);
+ GotoIf(IsString(input), &is_string);
+ BranchIfJSReceiver(input, &to_primitive, &to_string);
+
+ BIND(&to_primitive);
+ {
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ result.Bind(CallStub(callable, context, input));
+ Goto(&to_string);
+ }
+
+ BIND(&to_string);
+ {
+ if (feedback) {
+ feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ }
+ result.Bind(CallBuiltin(Builtins::kToString, context, result.value()));
+ Goto(&done);
+ }
+
+ BIND(&is_string);
+ {
+ if (feedback) {
+ feedback->Bind(
+ SelectSmiConstant(WordEqual(input, EmptyStringConstant()),
+ BinaryOperationFeedback::kString,
+ BinaryOperationFeedback::kNonEmptyString));
+ }
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+}
+
+TF_BUILTIN(ToPrimitiveToString, ConversionBuiltinsAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(ToPrimitiveToString(context, input));
+}
+
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
Node* context, Node* input, OrdinaryToPrimitiveHint hint) {
@@ -221,6 +259,22 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) {
Return(BooleanConstant(false));
}
+// ES6 section 7.1.2 ToBoolean ( argument )
+// Requires parameter on stack so that it can be used as a continuation from a
+// LAZY deopt.
+TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
+ Node* value = Parameter(Descriptor::kArgument);
+
+ Label return_true(this), return_false(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+
+ BIND(&return_true);
+ Return(BooleanConstant(true));
+
+ BIND(&return_false);
+ Return(BooleanConstant(false));
+}
+
TF_BUILTIN(ToLength, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -247,8 +301,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
// Check if {len} is a HeapNumber.
Label if_lenisheapnumber(this),
if_lenisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(len)), &if_lenisheapnumber,
- &if_lenisnotheapnumber);
+ Branch(IsHeapNumber(len), &if_lenisheapnumber, &if_lenisnotheapnumber);
BIND(&if_lenisheapnumber);
{
@@ -273,8 +326,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
BIND(&if_lenisnotheapnumber);
{
// Need to convert {len} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_len.Bind(CallStub(callable, context, len));
+ var_len.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, len));
Goto(&loop);
}
@@ -285,7 +337,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
Return(NumberConstant(kMaxSafeInteger));
BIND(&return_zero);
- Return(SmiConstant(Smi::kZero));
+ Return(SmiConstant(0));
}
}
@@ -337,7 +389,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* js_value = Allocate(JSValue::kSize);
StoreMapNoWriteBarrier(js_value, initial_map);
- StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -345,9 +397,8 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Return(js_value);
BIND(&if_noconstructor);
- TailCallRuntime(
- Runtime::kThrowUndefinedOrNullToObject, context,
- HeapConstant(factory()->NewStringFromAsciiChecked("ToObject", TENURED)));
+ TailCallRuntime(Runtime::kThrowUndefinedOrNullToObject, context,
+ StringConstant("ToObject"));
BIND(&if_jsreceiver);
Return(object);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.h b/deps/v8/src/builtins/builtins-conversion-gen.h
new file mode 100644
index 0000000000..fedbc54d2e
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-conversion-gen.h
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
+#define V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ConversionBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* ToPrimitiveToString(Node* context, Node* input,
+ Variable* feedback = nullptr);
+
+ protected:
+ void Generate_NonPrimitiveToPrimitive(Node* context, Node* input,
+ ToPrimitiveHint hint);
+
+ void Generate_OrdinaryToPrimitive(Node* context, Node* input,
+ OrdinaryToPrimitiveHint hint);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 10bb39f861..579d537b73 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -50,7 +50,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
BIND(&stamp_mismatch);
}
- Node* field_index_smi = SmiConstant(Smi::FromInt(field_index));
+ Node* field_index_smi = SmiConstant(field_index);
Node* function =
ExternalConstant(ExternalReference::get_date_field_function(isolate()));
Node* result = CallCFunction2(
@@ -204,17 +204,19 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
GotoIf(WordEqual(hint, string_string), &hint_is_string);
// Slow-case with actual string comparisons.
- Callable string_equal = CodeFactory::StringEqual(isolate());
GotoIf(TaggedIsSmi(hint), &hint_is_invalid);
GotoIfNot(IsString(hint), &hint_is_invalid);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, number_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, number_string),
+ TrueConstant()),
&hint_is_number);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, default_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, default_string),
+ TrueConstant()),
&hint_is_string);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, string_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, string_string),
+ TrueConstant()),
&hint_is_string);
Goto(&hint_is_invalid);
@@ -247,9 +249,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
BIND(&receiver_is_invalid);
{
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- "Date.prototype [ @@toPrimitive ]", TENURED)),
- receiver);
+ StringConstant("Date.prototype [ @@toPrimitive ]"), receiver);
Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 9985bbe4b7..c46a44d0d3 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -162,7 +162,7 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
const char* local_timezone = date_cache->LocalTimezone(time_ms);
switch (mode) {
case kDateOnly:
- SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+ SNPrintF(str, "%s %s %02d %04d", kShortWeekDays[weekday],
kShortMonths[month], day, year);
return;
case kTimeOnly:
@@ -171,7 +171,7 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
local_timezone);
return;
case kDateAndTime:
- SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ SNPrintF(str, "%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
timezone_min, local_timezone);
@@ -822,7 +822,7 @@ BUILTIN(DatePrototypeToUTCString) {
int year, month, day, weekday, hour, min, sec, ms;
isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
&hour, &min, &sec, &ms);
- SNPrintF(ArrayVector(buffer), "%s, %02d %s %4d %02d:%02d:%02d GMT",
+ SNPrintF(ArrayVector(buffer), "%s, %02d %s %04d %02d:%02d:%02d GMT",
kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
sec);
return *isolate->factory()->NewStringFromAsciiChecked(buffer);
diff --git a/deps/v8/src/builtins/builtins-debug.cc b/deps/v8/src/builtins/builtins-debug-gen.cc
index de603287f2..de603287f2 100644
--- a/deps/v8/src/builtins/builtins-debug.cc
+++ b/deps/v8/src/builtins/builtins-debug-gen.cc
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index bce8eebb0f..8a87008def 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -62,20 +62,18 @@ namespace internal {
ASM(CallFunction_ReceiverIsNullOrUndefined) \
ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
ASM(CallFunction_ReceiverIsAny) \
- ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsAny) \
/* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
ASM(CallBoundFunction) \
- ASM(TailCallBoundFunction) \
/* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
ASM(Call_ReceiverIsNullOrUndefined) \
ASM(Call_ReceiverIsNotNullOrUndefined) \
ASM(Call_ReceiverIsAny) \
- ASM(TailCall_ReceiverIsNullOrUndefined) \
- ASM(TailCall_ReceiverIsNotNullOrUndefined) \
- ASM(TailCall_ReceiverIsAny) \
- ASM(CallWithSpread) \
+ \
+ /* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \
+ TFC(CallProxy, CallTrampoline, 1) \
+ ASM(CallVarargs) \
+ TFC(CallWithSpread, CallWithSpread, 1) \
+ TFC(CallWithArrayLike, CallWithArrayLike, 1) \
ASM(CallForwardVarargs) \
ASM(CallFunctionForwardVarargs) \
\
@@ -89,7 +87,9 @@ namespace internal {
ASM(ConstructProxy) \
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct) \
- ASM(ConstructWithSpread) \
+ ASM(ConstructVarargs) \
+ TFC(ConstructWithSpread, ConstructWithSpread, 1) \
+ TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
ASM(ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs) \
ASM(JSConstructStubApi) \
@@ -108,7 +108,6 @@ namespace internal {
TFC(FastCloneShallowObject, FastCloneShallowObject, 1) \
\
/* Apply and entries */ \
- ASM(Apply) \
ASM(JSEntryTrampoline) \
ASM(JSConstructEntryTrampoline) \
ASM(ResumeGeneratorTrampoline) \
@@ -126,6 +125,10 @@ namespace internal {
TFS(StringIndexOf, kReceiver, kSearchString, kPosition) \
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
+ TFC(StringConcat, StringConcat, 1) \
+ \
+ /* OrderedHashTable helpers */ \
+ TFS(OrderedHashTableHealIndex, kTable, kIndex) \
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
@@ -134,8 +137,6 @@ namespace internal {
ASM(InterpreterPushArgsThenCallFunction) \
ASM(InterpreterPushUndefinedAndArgsThenCallFunction) \
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
- ASM(InterpreterPushArgsThenTailCall) \
- ASM(InterpreterPushArgsThenTailCallFunction) \
ASM(InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructFunction) \
ASM(InterpreterPushArgsThenConstructArray) \
@@ -145,9 +146,7 @@ namespace internal {
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
- ASM(CompileOptimized) \
- ASM(CompileOptimizedConcurrent) \
- ASM(InOptimizationQueue) \
+ ASM(CheckOptimizationMarker) \
ASM(InstantiateAsmJs) \
ASM(MarkCodeAsToBeExecutedOnce) \
ASM(MarkCodeAsExecutedOnce) \
@@ -155,8 +154,33 @@ namespace internal {
ASM(NotifyDeoptimized) \
ASM(NotifySoftDeoptimized) \
ASM(NotifyLazyDeoptimized) \
- ASM(NotifyStubFailure) \
- ASM(NotifyStubFailureSaveDoubles) \
+ ASM(NotifyBuiltinContinuation) \
+ \
+ /* Trampolines called when returning from a deoptimization that expects */ \
+ /* to continue in a JavaScript builtin to finish the functionality of a */ \
+ /* an TF-inlined version of builtin that has side-effects. */ \
+ /* */ \
+ /* The trampolines work as follows: */ \
+ /* 1. Trampoline restores input register values that */ \
+ /* the builtin expects from a BuiltinContinuationFrame. */ \
+ /* 2. Trampoline tears down BuiltinContinuationFrame. */ \
+ /* 3. Trampoline jumps to the builtin's address. */ \
+ /* 4. Builtin executes as if invoked by the frame above it. */ \
+ /* 5. When the builtin returns, execution resumes normally in the */ \
+ /* calling frame, processing any return result from the JavaScript */ \
+ /* builtin as if it had called the builtin directly. */ \
+ /* */ \
+ /* There are two variants of the stub that differ in their handling of a */ \
+ /* value returned by the next frame deeper on the stack. For LAZY deopts, */ \
+ /* the return value (e.g. rax on x64) is explicitly passed as an extra */ \
+ /* stack parameter to the JavaScript builtin by the "WithResult" */ \
+ /* trampoline variant. The plain variant is used in EAGER deopt contexts */ \
+ /* and has no such special handling. */ \
+ ASM(ContinueToCodeStubBuiltin) \
+ ASM(ContinueToCodeStubBuiltinWithResult) \
+ ASM(ContinueToJavaScriptBuiltin) \
+ ASM(ContinueToJavaScriptBuiltinWithResult) \
+ \
ASM(OnStackReplacement) \
\
/* API callback handling */ \
@@ -192,12 +216,16 @@ namespace internal {
TFC(NonNumberToNumber, TypeConversion, 1) \
TFC(ToNumber, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
+ TFC(ToPrimitiveToString, TypeConversion, 1) \
TFC(ToInteger, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
\
+ /* Type conversions continuations */ \
+ TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
+ \
/* Handlers */ \
TFH(LoadICProtoArray, BUILTIN, kNoExtraICState, LoadICProtoArray) \
TFH(LoadICProtoArrayThrowIfNonexistent, BUILTIN, kNoExtraICState, \
@@ -233,8 +261,7 @@ namespace internal {
/* Special internal builtins */ \
CPP(EmptyFunction) \
CPP(Illegal) \
- CPP(RestrictedFunctionPropertiesThrower) \
- CPP(RestrictedStrictArgumentsPropertiesThrower) \
+ CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
TFJ(ReturnReceiver, 0) \
\
@@ -268,6 +295,10 @@ namespace internal {
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayForEachLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayForEachLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayForEachLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
@@ -284,6 +315,10 @@ namespace internal {
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayMapLoopEagerDeoptContinuation, 5, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength) \
+ TFJ(ArrayMapLoopLazyDeoptContinuation, 6, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
@@ -311,8 +346,8 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitCaught, 2, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitUncaught, 2, kAwaited, kOuterPromise) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
TFJ(AsyncFunctionPromiseCreate, 0) \
@@ -369,6 +404,7 @@ namespace internal {
CPP(ConsoleTime) \
CPP(ConsoleTimeEnd) \
CPP(ConsoleTimeStamp) \
+ CPP(ConsoleContext) \
\
/* DataView */ \
CPP(DataViewConstructor) \
@@ -493,11 +529,13 @@ namespace internal {
TFS(CreateGeneratorObject, kClosure, kReceiver) \
CPP(GeneratorFunctionConstructor) \
/* ES6 #sec-generator.prototype.next */ \
- TFJ(GeneratorPrototypeNext, 1, kValue) \
+ TFJ(GeneratorPrototypeNext, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.return */ \
- TFJ(GeneratorPrototypeReturn, 1, kValue) \
+ TFJ(GeneratorPrototypeReturn, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.throw */ \
- TFJ(GeneratorPrototypeThrow, 1, kException) \
+ TFJ(GeneratorPrototypeThrow, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
\
/* Global object */ \
@@ -538,6 +576,25 @@ namespace internal {
TFH(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, \
LoadGlobal) \
\
+ /* Map */ \
+ TFS(MapLookupHashIndex, kTable, kKey) \
+ TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapGet, 1, kKey) \
+ TFJ(MapHas, 1, kKey) \
+ CPP(MapClear) \
+ /* ES #sec-map.prototype.entries */ \
+ TFJ(MapPrototypeEntries, 0) \
+ /* ES #sec-get-map.prototype.size */ \
+ TFJ(MapPrototypeGetSize, 0) \
+ /* ES #sec-map.prototype.forEach */ \
+ TFJ(MapPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-map.prototype.keys */ \
+ TFJ(MapPrototypeKeys, 0) \
+ /* ES #sec-map.prototype.values */ \
+ TFJ(MapPrototypeValues, 0) \
+ /* ES #sec-%mapiteratorprototype%.next */ \
+ TFJ(MapIteratorPrototypeNext, 0) \
+ \
/* Math */ \
/* ES6 #sec-math.abs */ \
TFJ(MathAbs, 1, kX) \
@@ -651,16 +708,11 @@ namespace internal {
TFC(GreaterThanOrEqual, Compare, 1) \
TFC(Equal, Compare, 1) \
TFC(StrictEqual, Compare, 1) \
- TFC(AddWithFeedback, BinaryOpWithVector, 1) \
- TFC(SubtractWithFeedback, BinaryOpWithVector, 1) \
- TFC(MultiplyWithFeedback, BinaryOpWithVector, 1) \
- TFC(DivideWithFeedback, BinaryOpWithVector, 1) \
- TFC(ModulusWithFeedback, BinaryOpWithVector, 1) \
\
/* Object */ \
CPP(ObjectAssign) \
/* ES #sec-object.create */ \
- TFJ(ObjectCreate, 2, kPrototype, kProperties) \
+ TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectDefineGetter) \
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
@@ -687,6 +739,7 @@ namespace internal {
TFJ(ObjectProtoToString, 0) \
/* ES6 #sec-object.prototype.valueof */ \
TFJ(ObjectPrototypeValueOf, 0) \
+ TFJ(ObjectPrototypeIsPrototypeOf, 1, kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
@@ -710,11 +763,12 @@ namespace internal {
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
TFJ(PromiseInternalConstructor, 1, kParent) \
- TFJ(IsPromise, 1, kObject) \
+ CPP(IsPromise) \
/* ES #sec-promise-resolve-functions */ \
TFJ(PromiseResolveClosure, 1, kValue) \
/* ES #sec-promise-reject-functions */ \
TFJ(PromiseRejectClosure, 1, kValue) \
+ TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
TFJ(PromiseThen, 2, kOnFullfilled, kOnRejected) \
/* ES #sec-promise.prototype.catch */ \
@@ -734,10 +788,15 @@ namespace internal {
TFJ(PromiseCatchFinally, 1, kReason) \
TFJ(PromiseValueThunkFinally, 0) \
TFJ(PromiseThrowerFinally, 0) \
+ /* ES #sec-promise.all */ \
+ TFJ(PromiseAll, 1, kIterable) \
+ /* ES #sec-promise.race */ \
+ TFJ(PromiseRace, 1, kIterable) \
\
/* Proxy */ \
- CPP(ProxyConstructor) \
- CPP(ProxyConstructor_ConstructStub) \
+ TFJ(ProxyConstructor, 0) \
+ TFJ(ProxyConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Reflect */ \
ASM(ReflectApply) \
@@ -755,7 +814,9 @@ namespace internal {
CPP(ReflectSetPrototypeOf) \
\
/* RegExp */ \
+ TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \
TFS(RegExpPrototypeExecSlow, kReceiver, kString) \
+ \
CPP(RegExpCapture1Getter) \
CPP(RegExpCapture2Getter) \
CPP(RegExpCapture3Getter) \
@@ -810,6 +871,21 @@ namespace internal {
/* ES #sec-regexp.prototype-@@split */ \
TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
+ /* Set */ \
+ TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetHas, 1, kKey) \
+ CPP(SetClear) \
+ /* ES #sec-set.prototype.entries */ \
+ TFJ(SetPrototypeEntries, 0) \
+ /* ES #sec-get-set.prototype.size */ \
+ TFJ(SetPrototypeGetSize, 0) \
+ /* ES #sec-set.prototype.foreach */ \
+ TFJ(SetPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-set.prototype.values */ \
+ TFJ(SetPrototypeValues, 0) \
+ /* ES #sec-%setiteratorprototype%.next */ \
+ TFJ(SetIteratorPrototypeNext, 0) \
+ \
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
@@ -853,23 +929,16 @@ namespace internal {
/* ES6 #sec-string.prototype.slice */ \
TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.split */ \
- TFJ(StringPrototypeSplit, 2, kSeparator, kLimit) \
+ TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substr */ \
- TFJ(StringPrototypeSubstr, 2, kStart, kLength) \
+ TFJ(StringPrototypeSubstr, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substring */ \
- TFJ(StringPrototypeSubstring, 2, kStart, kEnd) \
+ TFJ(StringPrototypeSubstring, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.startswith */ \
CPP(StringPrototypeStartsWith) \
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
- /* ES #sec-string.prototype.tolocalelowercase */ \
- CPP(StringPrototypeToLocaleLowerCase) \
- /* ES #sec-string.prototype.tolocaleuppercase */ \
- CPP(StringPrototypeToLocaleUpperCase) \
- /* (obsolete) Unibrow version */ \
- CPP(StringPrototypeToLowerCase) \
- /* (obsolete) Unibrow version */ \
- CPP(StringPrototypeToUpperCase) \
CPP(StringPrototypeTrim) \
CPP(StringPrototypeTrimLeft) \
CPP(StringPrototypeTrimRight) \
@@ -948,6 +1017,9 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.forEach */ \
+ TFJ(TypedArrayPrototypeForEach, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -961,6 +1033,14 @@ namespace internal {
TFC(ThrowWasmTrapFuncInvalid, WasmRuntimeCall, 1) \
TFC(ThrowWasmTrapFuncSigMismatch, WasmRuntimeCall, 1) \
\
+ /* WeakMap */ \
+ TFS(WeakMapLookupHashIndex, kTable, kKey) \
+ TFJ(WeakMapGet, 1, kKey) \
+ TFJ(WeakMapHas, 1, kKey) \
+ \
+ /* WeakSet */ \
+ TFJ(WeakSetHas, 1, kKey) \
+ \
/* AsyncGenerator */ \
\
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
@@ -972,18 +1052,21 @@ namespace internal {
CPP(AsyncGeneratorFunctionConstructor) \
/* AsyncGenerator.prototype.next ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-next */ \
- TFJ(AsyncGeneratorPrototypeNext, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeNext, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.return ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-return */ \
- TFJ(AsyncGeneratorPrototypeReturn, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeReturn, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.throw ( exception ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-throw */ \
- TFJ(AsyncGeneratorPrototypeThrow, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeThrow, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
+ TFJ(AsyncGeneratorAwaitCaught, 1, kAwaited) \
+ TFJ(AsyncGeneratorAwaitUncaught, 1, kAwaited) \
TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
\
@@ -1001,23 +1084,36 @@ namespace internal {
TFJ(AsyncIteratorValueUnwrap, 1, kValue)
#ifdef V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
- \
- /* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0) \
- /* ES #sec-string.prototype.touppercase */ \
- CPP(StringPrototypeToUpperCaseIntl) \
- /* ES #sec-string.prototype.normalize */ \
- CPP(StringPrototypeNormalizeIntl)
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
+ \
+ TFS(StringToLowerCaseIntl, kString) \
+ /* ES #sec-string.prototype.tolowercase */ \
+ TFJ(StringPrototypeToLowerCaseIntl, 0) \
+ /* ES #sec-string.prototype.touppercase */ \
+ CPP(StringPrototypeToUpperCaseIntl) \
+ /* ES #sec-string.prototype.normalize */ \
+ CPP(StringPrototypeNormalizeIntl) \
+ /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
+ CPP(NumberFormatPrototypeFormatToParts)
#else
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
\
/* no-op fallback version */ \
- CPP(StringPrototypeNormalize)
+ CPP(StringPrototypeNormalize) \
+ /* same as toLowercase; fallback version */ \
+ CPP(StringPrototypeToLocaleLowerCase) \
+ /* same as toUppercase; fallback version */ \
+ CPP(StringPrototypeToLocaleUpperCase) \
+ /* (obsolete) Unibrow version */ \
+ CPP(StringPrototypeToLowerCase) \
+ /* (obsolete) Unibrow version */ \
+ CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
+// The exception thrown in the following builtins are caught
+// internally and result in a promise rejection.
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
@@ -1028,14 +1124,18 @@ namespace internal {
V(AsyncGeneratorAwaitCaught) \
V(AsyncGeneratorAwaitUncaught) \
V(PerformNativePromiseThen) \
+ V(PromiseAll) \
V(PromiseConstructor) \
V(PromiseHandle) \
+ V(PromiseRace) \
V(PromiseResolve) \
V(PromiseResolveClosure) \
V(RejectNativePromise) \
V(ResolveNativePromise) \
V(ResolvePromise)
+// The exception thrown in the following builtins are caught internally and will
+// not be propagated further or re-thrown
#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseHandleReject)
#define IGNORE_BUILTIN(...)
@@ -1058,6 +1158,14 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFJ(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+
+#define BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+
#define BUILTINS_WITH_UNTAGGED_PARAMS(V) V(WasmCompileLazy)
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 5b28863364..6d33d88f3f 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/accessors.h"
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -40,10 +41,12 @@ BUILTIN(ErrorConstructor) {
BUILTIN(ErrorCaptureStackTrace) {
HandleScope scope(isolate);
Handle<Object> object_obj = args.atOrUndefined(isolate, 1);
+
if (!object_obj->IsJSObject()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument, object_obj));
}
+
Handle<JSObject> object = Handle<JSObject>::cast(object_obj);
Handle<Object> caller = args.atOrUndefined(isolate, 2);
FrameSkipMode mode = caller->IsJSFunction() ? SKIP_UNTIL_SEEN : SKIP_FIRST;
@@ -52,27 +55,24 @@ BUILTIN(ErrorCaptureStackTrace) {
RETURN_FAILURE_ON_EXCEPTION(isolate,
isolate->CaptureAndSetDetailedStackTrace(object));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetSimpleStackTrace(object, mode, caller));
+
+ // Add the stack accessors.
+
+ Handle<AccessorInfo> error_stack =
+ Accessors::ErrorStackInfo(isolate, DONT_ENUM);
- // Eagerly format the stack trace and set the stack property.
-
- Handle<Object> stack_trace =
- isolate->CaptureSimpleStackTrace(object, mode, caller);
- if (!stack_trace->IsJSArray()) return isolate->heap()->undefined_value();
-
- Handle<Object> formatted_stack_trace;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, formatted_stack_trace,
- ErrorUtils::FormatStackTrace(isolate, object, stack_trace));
-
- PropertyDescriptor desc;
- desc.set_configurable(true);
- desc.set_writable(true);
- desc.set_value(formatted_stack_trace);
- Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate, object, isolate->factory()->stack_string(), &desc,
- Object::THROW_ON_ERROR);
- if (!status.IsJust()) return isolate->heap()->exception();
- CHECK(status.FromJust());
+ // Explicitly check for frozen objects. Other access checks are performed by
+ // the LookupIterator in SetAccessor below.
+ if (!JSObject::IsExtensible(object)) {
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kDefineDisallowed,
+ handle(error_stack->name(), isolate)));
+ }
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetAccessor(object, error_stack));
return isolate->heap()->undefined_value();
}
@@ -96,8 +96,8 @@ Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
RETURN_RESULT_OR_FAILURE(
isolate, ErrorUtils::MakeGenericError(isolate, constructor,
- Smi::cast(*template_index)->value(),
- arg0, arg1, arg2, SKIP_NONE));
+ Smi::ToInt(*template_index), arg0,
+ arg1, arg2, SKIP_NONE));
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-forin-gen.cc b/deps/v8/src/builtins/builtins-forin-gen.cc
index 476d3766dc..3547bda52d 100644
--- a/deps/v8/src/builtins/builtins-forin-gen.cc
+++ b/deps/v8/src/builtins/builtins-forin-gen.cc
@@ -111,7 +111,7 @@ void ForInBuiltinsAssembler::CheckPrototypeEnumCache(Node* receiver, Node* map,
// For all objects but the receiver, check that the cache is empty.
current_map.Bind(LoadMap(current_js_object.value()));
Node* enum_length = EnumLength(current_map.value());
- Node* zero_constant = SmiConstant(Smi::kZero);
+ Node* zero_constant = SmiConstant(0);
Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
}
}
@@ -127,8 +127,7 @@ void ForInBuiltinsAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
{
- Node* invalid_enum_cache_sentinel =
- SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
+ Node* invalid_enum_cache_sentinel = SmiConstant(kInvalidEnumCacheSentinel);
Node* enum_length = EnumLength(map);
Branch(WordEqual(enum_length, invalid_enum_cache_sentinel),
&check_dict_receiver, &check_empty_prototype);
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 6144c8828d..529e752f27 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -78,9 +78,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Node* native_context = LoadNativeContext(context);
Label map_done(this, vars);
- Node* bit_field = LoadMapBitField(receiver_map);
- int mask = static_cast<int>(1 << Map::kIsConstructor);
- GotoIf(IsSetWord32(bit_field, mask), &with_constructor);
+ GotoIf(IsConstructorMap(receiver_map), &with_constructor);
bound_function_map.Bind(LoadContextElement(
native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
@@ -106,7 +104,9 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Label arguments_done(this, &argument_array);
GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
Node* elements_length = ChangeUint32ToWord(Int32Sub(argc, Int32Constant(1)));
- Node* elements = AllocateFixedArray(FAST_ELEMENTS, elements_length);
+ Node* elements =
+ AllocateFixedArray(PACKED_ELEMENTS, elements_length, INTPTR_PARAMETERS,
+ kAllowLargeObjectAllocation);
VARIABLE(index, MachineType::PointerRepresentation());
index.Bind(IntPtrConstant(0));
VariableList foreach_vars({&index}, zone());
@@ -153,8 +153,8 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
JSBoundFunction::kBoundArgumentsOffset,
argument_array.value());
Node* empty_fixed_array = EmptyFixedArrayConstant();
- StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kPropertiesOffset,
- empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
empty_fixed_array);
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 7db1899b64..4f5a82cf97 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -134,8 +134,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
JSFunction::GetDerivedMap(isolate, target, new_target), Object);
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
- Handle<Map> map = Map::AsLanguageMode(
- initial_map, shared_info->language_mode(), shared_info->kind());
+ Handle<Map> map = Map::AsLanguageMode(initial_map, shared_info);
Handle<Context> context(function->context(), isolate);
function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b011f1e5cd..2dbf34fcff 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -18,16 +18,15 @@ class GeneratorBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void GeneratorPrototypeResume(Node* receiver, Node* value, Node* context,
+ void GeneratorPrototypeResume(CodeStubArguments* args, Node* receiver,
+ Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode,
char const* const method_name);
};
void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
- Node* receiver, Node* value, Node* context,
+ CodeStubArguments* args, Node* receiver, Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
- Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
-
// Check if the {receiver} is actually a JSGeneratorObject.
Label if_receiverisincompatible(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
@@ -41,49 +40,70 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
Label if_receiverisclosed(this, Label::kDeferred),
if_receiverisrunning(this, Label::kDeferred);
+ Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
GotoIf(SmiEqual(receiver_continuation, closed), &if_receiverisclosed);
DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
JSGeneratorObject::kGeneratorClosed);
GotoIf(SmiLessThan(receiver_continuation, closed), &if_receiverisrunning);
// Resume the {receiver} using our trampoline.
- Node* result =
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value,
- receiver, SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kGeneratorYield)));
- Return(result);
+ VARIABLE(var_exception, MachineRepresentation::kTagged, UndefinedConstant());
+ Label if_exception(this, Label::kDeferred), if_final_return(this);
+ Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
+ value, receiver, SmiConstant(resume_mode));
+ // Make sure we close the generator if there was an exception.
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // If the generator is not suspended (i.e., its state is 'executing'),
+ // close it and wrap the return value in IteratorResult.
+ Node* result_continuation =
+ LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
+
+ // The generator function should not close the generator by itself, let's
+ // check it is indeed not closed yet.
+ CSA_ASSERT(this, SmiNotEqual(result_continuation, closed));
+
+ Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
+ GotoIf(SmiEqual(result_continuation, executing), &if_final_return);
+
+ args->PopAndReturn(result);
+
+ BIND(&if_final_return);
+ {
+ // Close the generator.
+ StoreObjectFieldNoWriteBarrier(
+ receiver, JSGeneratorObject::kContinuationOffset, closed);
+ // Return the wrapped result.
+ args->PopAndReturn(CallBuiltin(Builtins::kCreateIterResultObject, context,
+ result, TrueConstant()));
+ }
BIND(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(
- factory()->NewStringFromAsciiChecked(method_name, TENURED)),
- receiver);
+ StringConstant(method_name), receiver);
Unreachable();
}
BIND(&if_receiverisclosed);
{
- Callable create_iter_result_object =
- CodeFactory::CreateIterResultObject(isolate());
-
// The {receiver} is closed already.
Node* result = nullptr;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = CallStub(create_iter_result_object, context,
- UndefinedConstant(), TrueConstant());
+ result = CallBuiltin(Builtins::kCreateIterResultObject, context,
+ UndefinedConstant(), TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result =
- CallStub(create_iter_result_object, context, value, TrueConstant());
+ result = CallBuiltin(Builtins::kCreateIterResultObject, context, value,
+ TrueConstant());
break;
case JSGeneratorObject::kThrow:
result = CallRuntime(Runtime::kThrow, context, value);
break;
}
- Return(result);
+ args->PopAndReturn(result);
}
BIND(&if_receiverisrunning);
@@ -91,32 +111,63 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
CallRuntime(Runtime::kThrowGeneratorRunning, context);
Unreachable();
}
+
+ BIND(&if_exception);
+ {
+ StoreObjectFieldNoWriteBarrier(
+ receiver, JSGeneratorObject::kContinuationOffset, closed);
+ CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ Unreachable();
+ }
}
// ES6 #sec-generator.prototype.next
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, value, context, JSGeneratorObject::kNext,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, value, context,
+ JSGeneratorObject::kNext,
"[Generator].prototype.next");
}
// ES6 #sec-generator.prototype.return
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, value, context, JSGeneratorObject::kReturn,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, value, context,
+ JSGeneratorObject::kReturn,
"[Generator].prototype.return");
}
// ES6 #sec-generator.prototype.throw
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* exception = Parameter(Descriptor::kException);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, exception, context,
+ const int kExceptionArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* exception = args.GetOptionalArgumentValue(kExceptionArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
"[Generator].prototype.throw");
}
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index fc0f580796..5708fe67fb 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -30,8 +30,7 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(this),
if_numisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(num)), &if_numisheapnumber,
- &if_numisnotheapnumber);
+ Branch(IsHeapNumber(num), &if_numisheapnumber, &if_numisnotheapnumber);
BIND(&if_numisheapnumber);
{
@@ -44,17 +43,16 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
BIND(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_num.Bind(CallStub(callable, context, num));
+ var_num.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, num));
Goto(&loop);
}
}
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 #sec-isnan-number
@@ -78,8 +76,7 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(this),
if_numisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(num)), &if_numisheapnumber,
- &if_numisnotheapnumber);
+ Branch(IsHeapNumber(num), &if_numisheapnumber, &if_numisnotheapnumber);
BIND(&if_numisheapnumber);
{
@@ -91,17 +88,16 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
BIND(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_num.Bind(CallStub(callable, context, num));
+ var_num.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, num));
Goto(&loop);
}
}
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index abd961998c..1426d987fc 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -35,12 +35,12 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
Node* length = TaggedToParameter(LoadFixedArrayBaseLength(source), mode);
// Check if we can allocate in new space.
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
- Label if_newspace(this), if_oldspace(this);
+ Label if_newspace(this), if_lospace(this, Label::kDeferred);
Branch(UintPtrOrSmiLessThan(length, IntPtrOrSmiConstant(max_elements, mode),
mode),
- &if_newspace, &if_oldspace);
+ &if_newspace, &if_lospace);
BIND(&if_newspace);
{
@@ -51,9 +51,10 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
Return(target);
}
- BIND(&if_oldspace);
+ BIND(&if_lospace);
{
- Node* target = AllocateFixedArray(kind, length, mode, kPretenured);
+ Node* target =
+ AllocateFixedArray(kind, length, mode, kAllowLargeObjectAllocation);
CopyFixedArrayElements(kind, source, target, length, UPDATE_WRITE_BARRIER,
mode);
StoreObjectField(object, JSObject::kElementsOffset, target);
@@ -68,7 +69,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
Label runtime(this, Label::kDeferred);
Node* elements = LoadElements(object);
- elements = TryGrowElementsCapacity(object, elements, FAST_DOUBLE_ELEMENTS,
+ elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS,
key, &runtime);
Return(elements);
@@ -84,7 +85,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
Label runtime(this, Label::kDeferred);
Node* elements = LoadElements(object);
elements =
- TryGrowElementsCapacity(object, elements, FAST_ELEMENTS, key, &runtime);
+ TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime);
Return(elements);
BIND(&runtime);
@@ -96,7 +97,7 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
Node* length = SmiToWord(Parameter(Descriptor::kLength));
// Check if we can allocate in new space.
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
Label if_newspace(this), if_oldspace(this, Label::kDeferred);
Branch(IntPtrLessThan(length, IntPtrConstant(max_elements)), &if_newspace,
@@ -189,7 +190,7 @@ class DeletePropertyBaseAssembler : public CodeStubAssembler {
StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
SKIP_WRITE_BARRIER);
StoreDetailsByKeyIndex<NameDictionary>(properties, key_index,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
// Update bookkeeping information (see NameDictionary::ElementRemoved).
Node* nof = GetNumberOfElements<NameDictionary>(properties);
@@ -204,7 +205,7 @@ class DeletePropertyBaseAssembler : public CodeStubAssembler {
Node* capacity = GetCapacity<NameDictionary>(properties);
GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
- CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver, name);
+ CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver);
Goto(&shrinking_done);
BIND(&shrinking_done);
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index 22d20031ea..810d6e930d 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -13,7 +13,6 @@ namespace internal {
BUILTIN(Illegal) {
UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
}
BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
@@ -24,17 +23,7 @@ BUILTIN(UnsupportedThrower) {
NewError(MessageTemplate::kUnsupported));
}
-// -----------------------------------------------------------------------------
-// Throwers for restricted function properties and strict arguments object
-// properties
-
-BUILTIN(RestrictedFunctionPropertiesThrower) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
-}
-
-BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
+BUILTIN(StrictPoisonPillThrower) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index d11aa64af0..a8552338c8 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -11,51 +11,36 @@ namespace internal {
void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
- InterpreterPushArgsMode::kOther);
+ masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsThenCallFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
- InterpreterPushArgsMode::kJSFunction);
+ masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kJSFunction);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCallFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kJSFunction);
}
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
}
-void Builtins::Generate_InterpreterPushArgsThenTailCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kAllow,
- InterpreterPushArgsMode::kOther);
-}
-
-void Builtins::Generate_InterpreterPushArgsThenTailCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kAllow,
- InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kOther);
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index dd6ef0d0d0..0e50ce2c59 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -12,41 +12,28 @@ namespace v8 {
namespace internal {
Handle<Code> Builtins::InterpreterPushArgsThenCall(
- ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode,
- InterpreterPushArgsMode mode) {
+ ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) {
switch (mode) {
case InterpreterPushArgsMode::kJSFunction:
- if (tail_call_mode == TailCallMode::kDisallow) {
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return InterpreterPushUndefinedAndArgsThenCallFunction();
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return InterpreterPushArgsThenCallFunction();
- }
- } else {
- CHECK_EQ(receiver_mode, ConvertReceiverMode::kAny);
- return InterpreterPushArgsThenTailCallFunction();
+ switch (receiver_mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return InterpreterPushUndefinedAndArgsThenCallFunction();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ case ConvertReceiverMode::kAny:
+ return InterpreterPushArgsThenCallFunction();
}
case InterpreterPushArgsMode::kWithFinalSpread:
- CHECK(tail_call_mode == TailCallMode::kDisallow);
return InterpreterPushArgsThenCallWithFinalSpread();
case InterpreterPushArgsMode::kOther:
- if (tail_call_mode == TailCallMode::kDisallow) {
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return InterpreterPushUndefinedAndArgsThenCall();
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return InterpreterPushArgsThenCall();
- }
- } else {
- CHECK_EQ(receiver_mode, ConvertReceiverMode::kAny);
- return InterpreterPushArgsThenTailCall();
+ switch (receiver_mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return InterpreterPushUndefinedAndArgsThenCall();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ case ConvertReceiverMode::kAny:
+ return InterpreterPushArgsThenCall();
}
}
UNREACHABLE();
- return Handle<Code>::null();
}
Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
@@ -60,7 +47,6 @@ Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
return InterpreterPushArgsThenConstruct();
}
UNREACHABLE();
- return Handle<Code>::null();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 3782d43a9a..cb7de423d3 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -18,12 +18,11 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
};
-TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
- Node* const maybe_string = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
+ Node* const string = Parameter(Descriptor::kString);
Node* const context = Parameter(Descriptor::kContext);
- Node* const string =
- ToThisString(context, maybe_string, "String.prototype.toLowerCase");
+ CSA_ASSERT(this, IsString(string));
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
@@ -64,21 +63,21 @@ TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone());
- BuildFastLoop(
- push_vars, start_address, end_address,
- [=, &var_cursor, &var_did_change](Node* current) {
- Node* c = Load(MachineType::Uint8(), current);
- Node* lower = Load(MachineType::Uint8(), to_lower_table_addr,
+ BuildFastLoop(push_vars, start_address, end_address,
+ [=, &var_cursor, &var_did_change](Node* current) {
+ Node* c = Load(MachineType::Uint8(), current);
+ Node* lower =
+ Load(MachineType::Uint8(), to_lower_table_addr,
ChangeInt32ToIntPtr(c));
- StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
- var_cursor.value(), lower);
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
+ var_cursor.value(), lower);
- var_did_change.Bind(
- Word32Or(Word32NotEqual(c, lower), var_did_change.value()));
+ var_did_change.Bind(Word32Or(Word32NotEqual(c, lower),
+ var_did_change.value()));
- Increment(var_cursor);
- },
- kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Increment(var_cursor);
+ },
+ kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
@@ -114,11 +113,21 @@ TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
BIND(&runtime);
{
- Node* const result =
- CallRuntime(Runtime::kStringToLowerCaseIntl, context, string);
+ Node* const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
+ NoContextConstant(), string);
Return(result);
}
}
+TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
+ Node* const maybe_string = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const string =
+ ToThisString(context, maybe_string, "String.prototype.toLowerCase");
+
+ Return(CallBuiltin(Builtins::kStringToLowerCaseIntl, context, string));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index c14d73b3b6..b3ad156158 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -6,12 +6,21 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
+#include "src/builtins/builtins-intl.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/intl.h"
#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "unicode/decimfmt.h"
+#include "unicode/fieldpos.h"
+#include "unicode/fpositer.h"
#include "unicode/normalizer2.h"
+#include "unicode/numfmt.h"
+#include "unicode/ufieldpositer.h"
+#include "unicode/unistr.h"
+#include "unicode/ustring.h"
namespace v8 {
namespace internal {
@@ -97,5 +106,265 @@ BUILTIN(StringPrototypeNormalizeIntl) {
result.length())));
}
+namespace {
+
+// The list comes from third_party/icu/source/i18n/unicode/unum.h.
+// They're mapped to NumberFormat part types mentioned throughout
+// https://tc39.github.io/ecma402/#sec-partitionnumberpattern .
+Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
+ Isolate* isolate) {
+ switch (static_cast<UNumberFormatFields>(field_id)) {
+ case UNUM_INTEGER_FIELD:
+ if (std::isfinite(number)) return isolate->factory()->integer_string();
+ if (std::isnan(number)) return isolate->factory()->nan_string();
+ return isolate->factory()->infinity_string();
+ case UNUM_FRACTION_FIELD:
+ return isolate->factory()->fraction_string();
+ case UNUM_DECIMAL_SEPARATOR_FIELD:
+ return isolate->factory()->decimal_string();
+ case UNUM_GROUPING_SEPARATOR_FIELD:
+ return isolate->factory()->group_string();
+ case UNUM_CURRENCY_FIELD:
+ return isolate->factory()->currency_string();
+ case UNUM_PERCENT_FIELD:
+ return isolate->factory()->percentSign_string();
+ case UNUM_SIGN_FIELD:
+ return number < 0 ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
+
+ case UNUM_EXPONENT_SYMBOL_FIELD:
+ case UNUM_EXPONENT_SIGN_FIELD:
+ case UNUM_EXPONENT_FIELD:
+ // We should never get these because we're not using any scientific
+ // formatter.
+ UNREACHABLE();
+ return Handle<String>();
+
+ case UNUM_PERMILL_FIELD:
+ // We're not creating any permill formatter, and it's not even clear how
+ // that would be possible with the ICU API.
+ UNREACHABLE();
+ return Handle<String>();
+
+ default:
+ UNREACHABLE();
+ return Handle<String>();
+ }
+}
+
+bool AddElement(Handle<JSArray> array, int index,
+ Handle<String> field_type_string,
+ const icu::UnicodeString& formatted, int32_t begin, int32_t end,
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
+ Handle<String> value;
+ JSObject::AddProperty(element, factory->type_string(), field_type_string,
+ NONE);
+
+ icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(field.getBuffer()),
+ field.length())),
+ false);
+
+ JSObject::AddProperty(element, factory->value_string(), value, NONE);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::AddDataElement(array, index, element, NONE), false);
+ return true;
+}
+
+bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
+ const NumberFormatSpan& b) {
+ // Regions that start earlier should be encountered earlier.
+ if (a.begin_pos < b.begin_pos) return true;
+ if (a.begin_pos > b.begin_pos) return false;
+ // For regions that start in the same place, regions that last longer should
+ // be encountered earlier.
+ if (a.end_pos < b.end_pos) return false;
+ if (a.end_pos > b.end_pos) return true;
+ // For regions that are exactly the same, one of them must be the "literal"
+ // backdrop we added, which has a field_id of -1, so consider higher field_ids
+ // to be later.
+ return a.field_id < b.field_id;
+}
+
+Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
+ double number) {
+ Factory* factory = isolate->factory();
+
+ icu::UnicodeString formatted;
+ icu::FieldPositionIterator fp_iter;
+ UErrorCode status = U_ZERO_ERROR;
+ fmt->format(number, formatted, &fp_iter, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+ int32_t length = formatted.length();
+ if (length == 0) return *result;
+
+ std::vector<NumberFormatSpan> regions;
+ // Add a "literal" backdrop for the entire string. This will be used if no
+ // other region covers some part of the formatted string. It's possible
+ // there's another field with exactly the same begin and end as this backdrop,
+ // in which case the backdrop's field_id of -1 will give it lower priority.
+ regions.push_back(NumberFormatSpan(-1, 0, formatted.length()));
+
+ {
+ icu::FieldPosition fp;
+ while (fp_iter.next(fp)) {
+ regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
+ fp.getEndIndex()));
+ }
+ }
+
+ std::vector<NumberFormatSpan> parts = FlattenRegionsToParts(&regions);
+
+ int index = 0;
+ for (auto it = parts.begin(); it < parts.end(); it++) {
+ NumberFormatSpan part = *it;
+ Handle<String> field_type_string =
+ part.field_id == -1
+ ? isolate->factory()->literal_string()
+ : IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
+ if (!AddElement(result, index, field_type_string, formatted, part.begin_pos,
+ part.end_pos, isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ ++index;
+ }
+ JSObject::ValidateElements(*result);
+
+ return *result;
+}
+} // namespace
+
+// Flattens a list of possibly-overlapping "regions" to a list of
+// non-overlapping "parts". At least one of the input regions must span the
+// entire space of possible indexes. The regions parameter will sorted in-place
+// according to some criteria; this is done for performance to avoid copying the
+// input.
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions) {
+ // The intention of this algorithm is that it's used to translate ICU "fields"
+ // to JavaScript "parts" of a formatted string. Each ICU field and JavaScript
+ // part has an integer field_id, which corresponds to something like "grouping
+ // separator", "fraction", or "percent sign", and has a begin and end
+ // position. Here's a diagram of:
+
+ // var nf = new Intl.NumberFormat(['de'], {style:'currency',currency:'EUR'});
+ // nf.formatToParts(123456.78);
+
+ // : 6
+ // input regions: 0000000211 7
+ // ('-' means -1): ------------
+ // formatted string: "123.456,78Ā ā‚¬"
+ // output parts: 0006000211-7
+
+ // To illustrate the requirements of this algorithm, here's a contrived and
+ // convoluted example of inputs and expected outputs:
+
+ // : 4
+ // : 22 33 3
+ // : 11111 22
+ // input regions: 0000000 111
+ // : ------------
+ // formatted string: "abcdefghijkl"
+ // output parts: 0221340--231
+ // (The characters in the formatted string are irrelevant to this function.)
+
+ // We arrange the overlapping input regions like a mountain range where
+ // smaller regions are "on top" of larger regions, and we output a birds-eye
+ // view of the mountains, so that smaller regions take priority over larger
+ // regions.
+ std::sort(regions->begin(), regions->end(), cmp_NumberFormatSpan);
+ std::vector<size_t> overlapping_region_index_stack;
+ // At least one item in regions must be a region spanning the entire string.
+ // Due to the sorting above, the first item in the vector will be one of them.
+ overlapping_region_index_stack.push_back(0);
+ NumberFormatSpan top_region = regions->at(0);
+ size_t region_iterator = 1;
+ int32_t entire_size = top_region.end_pos;
+
+ std::vector<NumberFormatSpan> out_parts;
+
+ // The "climber" is a cursor that advances from left to right climbing "up"
+ // and "down" the mountains. Whenever the climber moves to the right, that
+ // represents an item of output.
+ int32_t climber = 0;
+ while (climber < entire_size) {
+ int32_t next_region_begin_pos;
+ if (region_iterator < regions->size()) {
+ next_region_begin_pos = regions->at(region_iterator).begin_pos;
+ } else {
+ // finish off the rest of the input by proceeding to the end.
+ next_region_begin_pos = entire_size;
+ }
+
+ if (climber < next_region_begin_pos) {
+ while (top_region.end_pos < next_region_begin_pos) {
+ if (climber < top_region.end_pos) {
+ // step down
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ top_region.end_pos));
+ climber = top_region.end_pos;
+ } else {
+ // drop down
+ }
+ overlapping_region_index_stack.pop_back();
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ if (climber < next_region_begin_pos) {
+ // cross a plateau/mesa/valley
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ next_region_begin_pos));
+ climber = next_region_begin_pos;
+ }
+ }
+ if (region_iterator < regions->size()) {
+ overlapping_region_index_stack.push_back(region_iterator++);
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ }
+ return out_parts;
+}
+
+BUILTIN(NumberFormatPrototypeFormatToParts) {
+ const char* const method = "Intl.NumberFormat.prototype.formatToParts";
+ HandleScope handle_scope(isolate);
+ CHECK_RECEIVER(JSObject, number_format_holder, method);
+
+ Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
+ Handle<Object> tag =
+ JSReceiver::GetDataProperty(number_format_holder, marker);
+ Handle<String> expected_tag =
+ isolate->factory()->NewStringFromStaticChars("numberformat");
+ if (!(tag->IsString() && String::cast(*tag)->Equals(*expected_tag))) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ number_format_holder));
+ }
+
+ Handle<Object> x;
+ if (args.length() >= 1) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumber(args.at(1)));
+ } else {
+ x = isolate->factory()->nan_value();
+ }
+
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ CHECK_NOT_NULL(number_format);
+
+ Object* result = FormatNumberToParts(isolate, number_format, x->Number());
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
new file mode 100644
index 0000000000..8dda0c0898
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-intl.h
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_INTL_H_
+#define V8_BUILTINS_BUILTINS_INTL_H_
+
+#include <stdint.h>
+#include <vector>
+
+namespace v8 {
+namespace internal {
+
+struct NumberFormatSpan {
+ int32_t field_id;
+ int32_t begin_pos;
+ int32_t end_pos;
+
+ NumberFormatSpan() {}
+ NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
+ : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
+};
+
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
new file mode 100644
index 0000000000..d60cfb7128
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -0,0 +1,184 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-iterator-gen.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
+ Label* if_exception,
+ Variable* exception) {
+ Node* method = GetProperty(context, object, factory()->iterator_symbol());
+ GotoIfException(method, if_exception, exception);
+
+ Callable callable = CodeFactory::Call(isolate());
+ Node* iterator = CallJS(callable, context, method, object);
+ GotoIfException(iterator, if_exception, exception);
+
+ Label done(this), if_notobject(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(iterator), &if_notobject);
+ Branch(IsJSReceiver(iterator), &done, &if_notobject);
+
+ BIND(&if_notobject);
+ {
+ Node* ret =
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kNotAnIterator), iterator);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&done);
+ return iterator;
+}
+
+Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
+ Label* if_done,
+ Node* fast_iterator_result_map,
+ Label* if_exception,
+ Variable* exception) {
+ DCHECK_NOT_NULL(if_done);
+
+ // IteratorNext
+ Node* next_method = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next_method, if_exception, exception);
+
+ // 1. a. Let result be ? Invoke(iterator, "next", Ā« Ā»).
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, next_method, iterator);
+ GotoIfException(result, if_exception, exception);
+
+ // 3. If Type(result) is not Object, throw a TypeError exception.
+ Label if_notobject(this, Label::kDeferred), return_result(this);
+ GotoIf(TaggedIsSmi(result), &if_notobject);
+ GotoIfNot(IsJSReceiver(result), &if_notobject);
+
+ VARIABLE(var_done, MachineRepresentation::kTagged);
+
+ if (fast_iterator_result_map != nullptr) {
+ // Fast iterator result case:
+ Label if_generic(this);
+
+ // 4. Return result.
+ Node* map = LoadMap(result);
+ GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
+
+ // IteratorComplete
+ // 2. Return ToBoolean(? Get(iterResult, "done")).
+ Node* done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
+ CSA_ASSERT(this, IsBoolean(done));
+ var_done.Bind(done);
+ Goto(&return_result);
+
+ BIND(&if_generic);
+ }
+
+ // Generic iterator result case:
+ {
+ // IteratorComplete
+ // 2. Return ToBoolean(? Get(iterResult, "done")).
+ Node* done = GetProperty(context, result, factory()->done_string());
+ GotoIfException(done, if_exception, exception);
+ var_done.Bind(done);
+
+ Label to_boolean(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(done), &to_boolean);
+ Branch(IsBoolean(done), &return_result, &to_boolean);
+
+ BIND(&to_boolean);
+ var_done.Bind(CallBuiltin(Builtins::kToBoolean, context, done));
+ Goto(&return_result);
+ }
+
+ BIND(&if_notobject);
+ {
+ Node* ret =
+ CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&return_result);
+ GotoIf(IsTrue(var_done.value()), if_done);
+ return result;
+}
+
+Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
+ Node* fast_iterator_result_map,
+ Label* if_exception,
+ Variable* exception) {
+ CSA_ASSERT(this, IsJSReceiver(result));
+
+ Label exit(this);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ if (fast_iterator_result_map != nullptr) {
+ // Fast iterator result case:
+ Label if_generic(this);
+ Node* map = LoadMap(result);
+ GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
+ var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset));
+ Goto(&exit);
+
+ BIND(&if_generic);
+ }
+
+ // Generic iterator result case:
+ {
+ Node* value = GetProperty(context, result, factory()->value_string());
+ GotoIfException(value, if_exception, exception);
+ var_value.Bind(value);
+ Goto(&exit);
+ }
+
+ BIND(&exit);
+ return var_value.value();
+}
+
+void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
+ Node* iterator,
+ Label* if_exception,
+ Variable* exception) {
+ // Perform ES #sec-iteratorclose when an exception occurs. This simpler
+ // algorithm does not include redundant steps which are never reachable from
+ // the spec IteratorClose algorithm.
+ DCHECK_NOT_NULL(if_exception);
+ DCHECK_NOT_NULL(exception);
+ CSA_ASSERT(this, IsNotTheHole(exception->value()));
+ CSA_ASSERT(this, IsJSReceiver(iterator));
+
+ // Let return be ? GetMethod(iterator, "return").
+ Node* method = GetProperty(context, iterator, factory()->return_string());
+ GotoIfException(method, if_exception, exception);
+
+ // If return is undefined, return Completion(completion).
+ GotoIf(Word32Or(IsUndefined(method), IsNull(method)), if_exception);
+
+ {
+ // Let innerResult be Call(return, iterator, Ā« Ā»).
+ // If an exception occurs, the original exception remains bound
+ Node* inner_result =
+ CallJS(CodeFactory::Call(isolate()), context, method, iterator);
+ GotoIfException(inner_result, if_exception, nullptr);
+
+ // (If completion.[[Type]] is throw) return Completion(completion).
+ Goto(if_exception);
+ }
+}
+
+void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
+ Node* iterator,
+ Variable* exception) {
+ Label rethrow(this, Label::kDeferred);
+ IteratorCloseOnException(context, iterator, &rethrow, exception);
+
+ BIND(&rethrow);
+ CallRuntime(Runtime::kReThrow, context, exception->value());
+ Unreachable();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
new file mode 100644
index 0000000000..0ed6077024
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class IteratorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ // https://tc39.github.io/ecma262/#sec-getiterator --- never used for
+ // @@asyncIterator.
+ Node* GetIterator(Node* context, Node* object, Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorstep
+ // Returns `false` if the iterator is done, otherwise returns an
+ // iterator result.
+ // `fast_iterator_result_map` refers to the map for the JSIteratorResult
+ // object, loaded from the native context.
+ Node* IteratorStep(Node* context, Node* iterator, Label* if_done,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorvalue
+ // Return the `value` field from an iterator.
+ // `fast_iterator_result_map` refers to the map for the JSIteratorResult
+ // object, loaded from the native context.
+ Node* IteratorValue(Node* context, Node* result,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorclose
+ void IteratorCloseOnException(Node* context, Node* iterator,
+ Label* if_exception, Variable* exception);
+ void IteratorCloseOnException(Node* context, Node* iterator,
+ Variable* exception);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index e5c8489301..b8d7b44e0b 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -59,8 +59,8 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
} else {
// Check if {x} is already positive.
Label if_xispositive(this), if_xisnotpositive(this);
- BranchIfSmiLessThanOrEqual(SmiConstant(Smi::FromInt(0)), x,
- &if_xispositive, &if_xisnotpositive);
+ BranchIfSmiLessThanOrEqual(SmiConstant(0), x, &if_xispositive,
+ &if_xisnotpositive);
BIND(&if_xispositive);
{
@@ -93,8 +93,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -107,8 +106,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -140,8 +138,7 @@ void MathBuiltinsAssembler::MathRoundingOperation(
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -154,8 +151,7 @@ void MathBuiltinsAssembler::MathRoundingOperation(
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -289,8 +285,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -301,8 +296,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -427,7 +421,7 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
// Cached random numbers are exhausted if index is 0. Go to slow path.
Label if_cached(this);
- GotoIf(SmiAbove(smi_index.value(), SmiConstant(Smi::kZero)), &if_cached);
+ GotoIf(SmiAbove(smi_index.value(), SmiConstant(0)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
smi_index.Bind(CallRuntime(Runtime::kGenerateRandomNumbers, context));
@@ -435,7 +429,7 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
// Compute next index by decrement.
BIND(&if_cached);
- Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(Smi::FromInt(1)));
+ Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(1));
StoreContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX,
new_smi_index);
@@ -468,10 +462,10 @@ TF_BUILTIN(MathSign, CodeStubAssembler) {
Return(ChangeFloat64ToTagged(x_value));
BIND(&if_xisnegative);
- Return(SmiConstant(Smi::FromInt(-1)));
+ Return(SmiConstant(-1));
BIND(&if_xispositive);
- Return(SmiConstant(Smi::FromInt(1)));
+ Return(SmiConstant(1));
}
// ES6 #sec-math.sin
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 56f988a1ca..9a1484708f 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -53,6 +53,11 @@ class NumberBuiltinsAssembler : public CodeStubAssembler {
Return(RelationalComparison(mode, lhs, rhs, context));
}
+
+ template <typename Descriptor>
+ void BinaryOp(Label* smis, Variable* var_left, Variable* var_right,
+ Label* doubles, Variable* var_left_double,
+ Variable* var_right_double);
};
// ES6 #sec-number.isfinite
@@ -65,7 +70,7 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Check if {number} contains a finite, non-NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -89,7 +94,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -118,7 +123,7 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Check if {number} contains a NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -141,7 +146,7 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -205,10 +210,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
// a cached array index.
Label if_inputcached(this), if_inputnotcached(this);
Node* input_hash = LoadNameHashField(input);
- Node* input_bit = Word32And(
- input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
- Branch(Word32Equal(input_bit, Int32Constant(0)), &if_inputcached,
- &if_inputnotcached);
+ Branch(IsClearWord32(input_hash,
+ Name::kDoesNotContainCachedArrayIndexMask),
+ &if_inputcached, &if_inputnotcached);
BIND(&if_inputcached);
{
@@ -252,8 +256,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
{
// Need to convert the {input} to String first.
// TODO(bmeurer): This could be more efficient if necessary.
- Callable callable = CodeFactory::ToString(isolate());
- var_input.Bind(CallStub(callable, context, input));
+ var_input.Bind(CallBuiltin(Builtins::kToString, context, input));
Goto(&loop);
}
}
@@ -270,8 +273,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
Label if_radix10(this), if_generic(this, Label::kDeferred);
GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
- GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(10))), &if_radix10);
- GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(0))), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(10)), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(0)), &if_radix10);
Goto(&if_generic);
BIND(&if_radix10);
@@ -319,9 +322,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
{
// Check if the String {input} has a cached array index.
Node* input_hash = LoadNameHashField(input);
- Node* input_bit = Word32And(
- input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
- GotoIf(Word32NotEqual(input_bit, Int32Constant(0)), &if_generic);
+ GotoIf(IsSetWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &if_generic);
// Return the cached array index as result.
Node* input_index =
@@ -348,985 +350,428 @@ TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
Return(result);
}
-TF_BUILTIN(Add, CodeStubAssembler) {
+class AddStubAssembler : public CodeStubAssembler {
+ public:
+ explicit AddStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void ConvertReceiverAndLoop(Variable* var_value, Label* loop, Node* context) {
+ // Call ToPrimitive explicitly without hint (whereas ToNumber
+ // would pass a "number" hint).
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_value->Bind(CallStub(callable, context, var_value->value()));
+ Goto(loop);
+ }
+
+ void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop,
+ Node* context) {
+ var_value->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_value->value()));
+ Goto(loop);
+ }
+
+ void ConvertAndLoop(Variable* var_value, Node* instance_type, Label* loop,
+ Node* context) {
+ Label is_not_receiver(this, Label::kDeferred);
+ GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver);
+
+ ConvertReceiverAndLoop(var_value, loop, context);
+
+ BIND(&is_not_receiver);
+ ConvertNonReceiverAndLoop(var_value, loop, context);
+ }
+};
+
+TF_BUILTIN(Add, AddStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+ VARIABLE(var_left, MachineRepresentation::kTagged,
+ Parameter(Descriptor::kLeft));
+ VARIABLE(var_right, MachineRepresentation::kTagged,
+ Parameter(Descriptor::kRight));
// Shared entry for floating point addition.
- Label do_fadd(this);
- VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64);
+ Label do_double_add(this);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars), end(this),
+ Variable* loop_vars[2] = {&var_left, &var_right};
+ Label loop(this, 2, loop_vars),
string_add_convert_left(this, Label::kDeferred),
string_add_convert_right(this, Label::kDeferred);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
Goto(&loop);
BIND(&loop);
{
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ Node* left = var_left.value();
+ Node* right = var_right.value();
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_heapobject(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_smi(this), if_right_heapobject(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- // Try fast Smi addition first.
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
+ // Try fast Smi addition first, bail out if it overflows.
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left),
+ BitcastTaggedToWord(right));
Node* overflow = Projection(1, pair);
-
- // Check if the Smi additon overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this);
+ GotoIf(overflow, &if_overflow);
+ Return(BitcastWordToTaggedSigned(Projection(0, pair)));
BIND(&if_overflow);
{
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
+ var_left_double.Bind(SmiToFloat64(left));
+ var_right_double.Bind(SmiToFloat64(right));
+ Goto(&do_double_add);
}
+ } // if_right_smi
- BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
-
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_heapobject);
{
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ Node* right_map = LoadMap(right);
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ Label if_right_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
- BIND(&if_rhsisnumber);
- {
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
+ // {right} is a HeapNumber.
+ var_left_double.Bind(SmiToFloat64(left));
+ var_right_double.Bind(LoadHeapNumberValue(right));
+ Goto(&do_double_add);
- BIND(&if_rhsisnotnumber);
+ BIND(&if_right_not_number);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- // Check if the {rhs} is a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_left);
- }
-
- BIND(&if_rhsisnotstring);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
- }
- }
+ } // if_right_heapobject
+ } // if_left_smi
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_heapobject);
{
- // Load the map and instance type of {lhs}.
- Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* left_map = LoadMap(left);
+ Label if_right_smi(this), if_right_heapobject(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
- // Check if {lhs} is a String.
- Label if_lhsisstring(this), if_lhsisnotstring(this);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
- &if_lhsisnotstring);
-
- BIND(&if_lhsisstring);
+ BIND(&if_right_smi);
{
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_right);
- }
+ Label if_left_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number);
- BIND(&if_lhsisnotstring);
- {
- // Check if {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ // {left} is a HeapNumber, {right} is a Smi.
+ var_left_double.Bind(LoadHeapNumberValue(left));
+ var_right_double.Bind(SmiToFloat64(right));
+ Goto(&do_double_add);
- BIND(&if_rhsissmi);
+ BIND(&if_left_not_number);
{
- // Check if {lhs} is a Number.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(
- Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsStringInstanceType(left_instance_type),
+ &string_add_convert_right);
+ // {left} is neither a Number nor a String, and {right} is a Smi.
+ ConvertAndLoop(&var_left, left_instance_type, &loop, context);
+ }
+ } // if_right_smi
- BIND(&if_lhsisnumber);
- {
- // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
- }
+ BIND(&if_right_heapobject);
+ {
+ Node* right_map = LoadMap(right);
- BIND(&if_lhsisnotnumber);
- {
- // The {lhs} is neither a Number nor a String, and the {rhs} is a
- // Smi.
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- BIND(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
- }
+ Label if_left_number(this), if_left_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number);
- BIND(&if_rhsisnotsmi);
+ BIND(&if_left_number);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ Label if_right_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
- // Check if {rhs} is a String.
- Label if_rhsisstring(this), if_rhsisnotstring(this);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
+ // Both {left} and {right} are HeapNumbers.
+ var_left_double.Bind(LoadHeapNumberValue(left));
+ var_right_double.Bind(LoadHeapNumberValue(right));
+ Goto(&do_double_add);
- BIND(&if_rhsisstring);
+ BIND(&if_right_not_number);
{
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_left);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ // {left} is a HeapNumber, {right} is neither Number nor String.
+ ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
+ } // if_left_number
- BIND(&if_rhsisnotstring);
- {
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this);
- Branch(
- Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- BIND(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(this),
- if_rhsisnotnumber(this, Label::kDeferred);
- Branch(Word32Equal(rhs_instance_type,
- Int32Constant(HEAP_NUMBER_TYPE)),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point addition.
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
-
- BIND(&if_rhsisnotnumber);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
-
- BIND(&if_lhsisnotnumber);
- {
- // Check if {lhs} is a JSReceiver.
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this);
- Branch(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- BIND(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
- }
- }
+ BIND(&if_left_not_number);
+ {
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsStringInstanceType(left_instance_type),
+ &string_add_convert_right);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ Label if_left_not_receiver(this, Label::kDeferred);
+ Label if_right_not_receiver(this, Label::kDeferred);
+ GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
+ &if_left_not_receiver);
+ // {left} is a JSReceiver, convert it first.
+ ConvertReceiverAndLoop(&var_left, &loop, context);
+
+ BIND(&if_left_not_receiver);
+ GotoIfNot(IsJSReceiverInstanceType(right_instance_type),
+ &if_right_not_receiver);
+ // {left} is a Primitive, but {right} is a JSReceiver, so convert
+ // {right} with priority.
+ ConvertReceiverAndLoop(&var_right, &loop, context);
+
+ BIND(&if_right_not_receiver);
+ // Neither {left} nor {right} are JSReceivers.
+ ConvertNonReceiverAndLoop(&var_left, &loop, context);
}
- }
- }
+ } // if_right_heapobject
+ } // if_left_heapobject
}
BIND(&string_add_convert_left);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
+ // Convert {left} to a String and concatenate it with the String {right}.
Callable callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- var_result.Bind(
- CallStub(callable, context, var_lhs.value(), var_rhs.value()));
- Goto(&end);
+ Return(CallStub(callable, context, var_left.value(), var_right.value()));
}
BIND(&string_add_convert_right);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
+ // Convert {right} to a String and concatenate it with the String {left}.
Callable callable = CodeFactory::StringAdd(
isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- var_result.Bind(
- CallStub(callable, context, var_lhs.value(), var_rhs.value()));
- Goto(&end);
+ Return(CallStub(callable, context, var_left.value(), var_right.value()));
}
- BIND(&do_fadd);
+ BIND(&do_double_add);
{
- Node* lhs_value = var_fadd_lhs.value();
- Node* rhs_value = var_fadd_rhs.value();
- Node* value = Float64Add(lhs_value, rhs_value);
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&end);
+ Node* value = Float64Add(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Subtract, CodeStubAssembler) {
+template <typename Descriptor>
+void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
+ Variable* var_right, Label* doubles,
+ Variable* var_left_double,
+ Variable* var_right_double) {
+ DCHECK(var_left->rep() == MachineRepresentation::kTagged);
+ DCHECK(var_right->rep() == MachineRepresentation::kTagged);
+
Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- // Shared entry for floating point subtraction.
- Label do_fsub(this), end(this);
- VARIABLE(var_fsub_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fsub_rhs, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive and/or ToNumber
- // conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
+ var_left->Bind(Parameter(Descriptor::kLeft));
+ var_right->Bind(Parameter(Descriptor::kRight));
+
+ // We might need to loop for ToNumber conversions.
+ Label loop(this, {var_left, var_right});
Goto(&loop);
BIND(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- BIND(&if_lhsissmi);
- {
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Try a fast Smi subtraction first.
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_overflow);
- {
- // The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
-
- BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ Label left_not_smi(this), right_not_smi(this);
+ Label left_not_number(this), right_not_number(this);
+ GotoIfNot(TaggedIsSmi(var_left->value()), &left_not_smi);
+ GotoIf(TaggedIsSmi(var_right->value()), smis);
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
- }
+ // At this point, var_left is a Smi but var_right is not.
+ GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ var_left_double->Bind(SmiToFloat64(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ Goto(doubles);
- BIND(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
-
- BIND(&if_lhsisnotsmi);
- {
- // Load the map of the {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
- // Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
-
- BIND(&if_lhsisnumber);
- {
- // Check if the {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ BIND(&left_not_smi);
+ {
+ GotoIfNot(IsHeapNumber(var_left->value()), &left_not_number);
+ GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi);
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
- }
+ // At this point, var_left is a HeapNumber and var_right is a Smi.
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(SmiToFloat64(var_right->value()));
+ Goto(doubles);
+ }
- BIND(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
+ BIND(&right_not_smi);
+ {
+ GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ Goto(doubles);
+ }
- BIND(&if_lhsisnotnumber);
- {
- // Convert the {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
+ BIND(&left_not_number);
+ {
+ var_left->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_left->value()));
+ Goto(&loop);
}
- BIND(&do_fsub);
+ BIND(&right_not_number);
{
- Node* lhs_value = var_fsub_lhs.value();
- Node* rhs_value = var_fsub_rhs.value();
- Node* value = Float64Sub(lhs_value, rhs_value);
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
+ var_right->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_right->value()));
+ Goto(&loop);
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Multiply, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_sub(this), do_double_sub(this);
- // Shared entry point for floating point multiplication.
- Label do_fmul(this), return_result(this);
- VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
+ BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub,
+ &var_left_double, &var_right_double);
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_variables);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
- Goto(&loop);
- BIND(&loop);
+ BIND(&do_smi_sub);
{
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- Label lhs_is_smi(this), lhs_is_not_smi(this);
- Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
- BIND(&lhs_is_smi);
+ // Try a fast Smi subtraction first, bail out if it overflows.
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(var_left.value()),
+ BitcastTaggedToWord(var_right.value()));
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ BIND(&if_overflow);
{
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(SmiMul(lhs, rhs));
- Goto(&return_result);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
-
- BIND(&rhs_is_number);
- {
- // Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(SmiToFloat64(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
- }
+ var_left_double.Bind(SmiToFloat64(var_left.value()));
+ var_right_double.Bind(SmiToFloat64(var_right.value()));
+ Goto(&do_double_sub);
}
- BIND(&lhs_is_not_smi);
- {
- Node* lhs_map = LoadMap(lhs);
-
- // Check if {lhs} is a HeapNumber.
- Label lhs_is_number(this), lhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(lhs_map), &lhs_is_number, &lhs_is_not_number);
-
- BIND(&lhs_is_number);
- {
- // Check if {rhs} is a Smi.
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(SmiToFloat64(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
-
- BIND(&rhs_is_number);
- {
- // Both {lhs} and {rhs} are HeapNumbers. Load their values and
- // multiply them.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
- }
- }
-
- BIND(&lhs_is_not_number);
- {
- // Convert {lhs} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
+ BIND(&if_notoverflow);
+ Return(BitcastWordToTaggedSigned(Projection(0, pair)));
}
- BIND(&do_fmul);
+ BIND(&do_double_sub);
{
- Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&return_result);
+ Node* value = Float64Sub(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
-
- BIND(&return_result);
- Return(var_result.value());
}
-TF_BUILTIN(Divide, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- // Shared entry point for floating point division.
- Label do_fdiv(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
-
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_dividend, MachineRepresentation::kTagged);
- VARIABLE(var_divisor, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(this, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- Goto(&loop);
- BIND(&loop);
- {
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
+TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_mul(this), do_double_mul(this);
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
+ BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul,
+ &var_left_double, &var_right_double);
- BIND(&divisor_is_smi);
- {
- Label bailout(this);
+ BIND(&do_smi_mul);
+ // The result is not necessarily a smi, in case of overflow.
+ Return(SmiMul(var_left.value(), var_right.value()));
- // Do floating point division if {divisor} is zero.
- GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
+ BIND(&do_double_mul);
+ Node* value = Float64Mul(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
+}
- // Do floating point division {dividend} is zero and {divisor} is
- // negative.
- Label dividend_is_zero(this), dividend_is_not_zero(this);
- Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
- &dividend_is_not_zero);
+TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_div(this), do_double_div(this);
- BIND(&dividend_is_zero);
- {
- GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
- Goto(&dividend_is_not_zero);
- }
- BIND(&dividend_is_not_zero);
+ BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div,
+ &var_left_double, &var_right_double);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ BIND(&do_smi_div);
+ {
+ // TODO(jkummerow): Consider just always doing a double division.
+ Label bailout(this);
+ Node* dividend = var_left.value();
+ Node* divisor = var_right.value();
- // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
- // if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
- Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
+ // Do floating point division if {divisor} is zero.
+ GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
- BIND(&divisor_is_minus_one);
- {
- GotoIf(
- Word32Equal(untagged_dividend,
- Int32Constant(kSmiValueSize == 32 ? kMinInt
- : (kMinInt >> 1))),
- &bailout);
- Goto(&divisor_is_not_minus_one);
- }
- BIND(&divisor_is_not_minus_one);
-
- // TODO(epertoso): consider adding a machine instruction that returns
- // both the result and the remainder.
- Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated = Int32Mul(untagged_result, untagged_divisor);
- // Do floating point division if the remainder is not 0.
- GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- var_result.Bind(SmiFromWord32(untagged_result));
- Goto(&end);
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- BIND(&bailout);
- {
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
- }
+ // Do floating point division if {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(this), dividend_is_not_zero(this);
+ Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+ &dividend_is_not_zero);
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
+ BIND(&dividend_is_zero);
+ {
+ GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
+ Goto(&dividend_is_not_zero);
+ }
+ BIND(&dividend_is_not_zero);
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
+ Node* untagged_divisor = SmiToWord32(divisor);
+ Node* untagged_dividend = SmiToWord32(dividend);
- BIND(&divisor_is_number);
- {
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
+ Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
+ BIND(&divisor_is_minus_one);
+ {
+ GotoIf(Word32Equal(
+ untagged_dividend,
+ Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ &bailout);
+ Goto(&divisor_is_not_minus_one);
}
-
- BIND(&dividend_is_not_smi);
+ BIND(&divisor_is_not_minus_one);
+
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = Int32Mul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
+ Return(SmiFromWord32(untagged_result));
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ BIND(&bailout);
{
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(this),
- dividend_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
- &dividend_is_not_number);
-
- BIND(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
-
- BIND(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_dividend.Bind(CallStub(callable, context, dividend));
- Goto(&loop);
- }
+ var_left_double.Bind(SmiToFloat64(dividend));
+ var_right_double.Bind(SmiToFloat64(divisor));
+ Goto(&do_double_div);
}
}
- BIND(&do_fdiv);
+ BIND(&do_double_div);
{
- Node* value =
- Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
+ Node* value = Float64Div(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Modulus, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Label return_result(this, &var_result);
-
- // Shared entry point for floating point modulus.
- Label do_fmod(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
-
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_dividend, MachineRepresentation::kTagged);
- VARIABLE(var_divisor, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(this, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- Goto(&loop);
- BIND(&loop);
- {
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
-
- BIND(&dividend_is_smi);
- {
- Label dividend_is_not_zero(this);
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Compute the modulus of two Smis.
- var_result.Bind(SmiMod(dividend, divisor));
- Goto(&return_result);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Convert {dividend} to a double and compute its modulus with the
- // value of {dividend}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(this),
- dividend_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
- &dividend_is_not_number);
-
- BIND(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and compute {dividend}'s modulus with
- // it.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and compute their modulus.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
+TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_mod(this), do_double_mod(this);
- BIND(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_dividend.Bind(CallStub(callable, context, dividend));
- Goto(&loop);
- }
- }
- }
+ BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod,
+ &var_left_double, &var_right_double);
- BIND(&do_fmod);
- {
- Node* value =
- Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&return_result);
- }
+ BIND(&do_smi_mod);
+ Return(SmiMod(var_left.value(), var_right.value()));
- BIND(&return_result);
- Return(var_result.value());
+ BIND(&do_double_mod);
+ Node* value = Float64Mod(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
@@ -1393,60 +838,5 @@ TF_BUILTIN(StrictEqual, CodeStubAssembler) {
Return(StrictEqual(lhs, rhs));
}
-TF_BUILTIN(AddWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_AddWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(SubtractWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_SubtractWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(MultiplyWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_MultiplyWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(DivideWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_DivideWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(ModulusWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_ModulusWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 346bafa1ae..2622daba49 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -39,10 +39,10 @@ BUILTIN(NumberPrototypeToExponential) {
isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
double const fraction_digits_number = fraction_digits->Number();
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -91,10 +91,10 @@ BUILTIN(NumberPrototypeToFixed) {
"toFixed() digits")));
}
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
char* const str = DoubleToFixedCString(
value_number, static_cast<int>(fraction_digits_number));
@@ -153,10 +153,10 @@ BUILTIN(NumberPrototypeToPrecision) {
Object::ToInteger(isolate, precision));
double const precision_number = precision->Number();
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
if (precision_number < 1.0 || precision_number > 21.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -217,10 +217,10 @@ BUILTIN(NumberPrototypeToString) {
}
// Slow case.
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
char* const str =
DoubleToRadixCString(value_number, static_cast<int>(radix_number));
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 6173bb79ab..6db05d9f1f 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -39,8 +39,8 @@ void ObjectBuiltinsAssembler::IsString(Node* object, Label* if_string,
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
Node* string) {
- Node* lhs = HeapConstant(factory()->NewStringFromStaticChars("[object "));
- Node* rhs = HeapConstant(factory()->NewStringFromStaticChars("]"));
+ Node* lhs = StringConstant("[object ");
+ Node* rhs = StringConstant("]");
Callable callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
@@ -157,15 +157,15 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* array = nullptr;
Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* array_length = SmiTag(object_enum_length);
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- FAST_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
INTPTR_PARAMETERS);
StoreMapNoWriteBarrier(elements, Heap::kFixedArrayMapRootIndex);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
array_length);
- CopyFixedArrayElements(FAST_ELEMENTS, object_enum_cache, elements,
+ CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_cache, elements,
object_enum_length, SKIP_WRITE_BARRIER);
Return(array);
}
@@ -191,15 +191,61 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
{
// Wrap the elements into a proper JSArray and return that.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* array = AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, array_map, var_length.value(), nullptr);
+ PACKED_ELEMENTS, array_map, var_length.value(), nullptr);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
var_elements.value());
Return(array);
}
}
+// ES #sec-object.prototype.isprototypeof
+TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ Label if_receiverisnullorundefined(this, Label::kDeferred),
+ if_valueisnotreceiver(this, Label::kDeferred);
+
+ // We only check whether {value} is a Smi here, so that the
+ // prototype chain walk below can safely access the {value}s
+ // map. We don't rule out Primitive {value}s, since all of
+ // them have null as their prototype, so the chain walk below
+ // immediately aborts and returns false anyways.
+ GotoIf(TaggedIsSmi(value), &if_valueisnotreceiver);
+
+ // Check if {receiver} is either null or undefined and in that case,
+ // invoke the ToObject builtin, which raises the appropriate error.
+ // Otherwise we don't need to invoke ToObject, since {receiver} is
+ // either already a JSReceiver, in which case ToObject is a no-op,
+ // or it's a Primitive and ToObject would allocate a fresh JSValue
+ // wrapper, which wouldn't be identical to any existing JSReceiver
+ // found in the prototype chain of {value}, hence it will return
+ // false no matter if we search for the Primitive {receiver} or
+ // a newly allocated JSValue wrapper for {receiver}.
+ GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
+ GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
+
+ // Loop through the prototype chain looking for the {receiver}.
+ Return(HasInPrototypeChain(context, value, receiver));
+
+ BIND(&if_receiverisnullorundefined);
+ {
+ // If {value} is a primitive HeapObject, we need to return
+ // false instead of throwing an exception per order of the
+ // steps in the specification, so check that first here.
+ GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
+
+ // Simulate the ToObject invocation on {receiver}.
+ CallBuiltin(Builtins::kToObject, context, receiver);
+ Unreachable();
+ }
+
+ BIND(&if_valueisnotreceiver);
+ Return(FalseConstant());
+}
+
// ES6 #sec-object.prototype.tostring
TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
Label return_undefined(this, Label::kDeferred),
@@ -222,8 +268,7 @@ TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
GotoIf(WordEqual(receiver, NullConstant()), &return_null);
- Callable to_object = CodeFactory::ToObject(isolate());
- receiver = CallStub(to_object, context, receiver);
+ receiver = CallBuiltin(Builtins::kToObject, context, receiver);
Node* receiver_instance_type = LoadInstanceType(receiver);
@@ -368,17 +413,21 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
- Callable to_object = CodeFactory::ToObject(isolate());
- receiver = CallStub(to_object, context, receiver);
-
- Return(receiver);
+ Return(CallBuiltin(Builtins::kToObject, context, receiver));
}
// ES #sec-object.create
TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
- Node* prototype = Parameter(Descriptor::kPrototype);
- Node* properties = Parameter(Descriptor::kProperties);
- Node* context = Parameter(Descriptor::kContext);
+ int const kPrototypeArg = 0;
+ int const kPropertiesArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* prototype = args.GetOptionalArgumentValue(kPrototypeArg);
+ Node* properties = args.GetOptionalArgumentValue(kPropertiesArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
@@ -449,13 +498,15 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&instantiate_map);
{
Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
- Return(instance);
+ args.PopAndReturn(instance);
}
}
BIND(&call_runtime);
{
- Return(CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+ Node* result =
+ CallRuntime(Runtime::kObjectCreate, context, prototype, properties);
+ args.PopAndReturn(result);
}
}
@@ -527,8 +578,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
- Node* register_file = AllocateFixedArray(FAST_HOLEY_ELEMENTS, size);
- FillFixedArrayWithValue(FAST_HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
+ Node* register_file = AllocateFixedArray(HOLEY_ELEMENTS, size);
+ FillFixedArrayWithValue(HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
size, Heap::kUndefinedValueRootIndex);
Node* const result = AllocateJSObjectFromMap(maybe_map);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1b236ec97c..d6d4772fd3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
@@ -33,7 +34,7 @@ void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
SmiConstant(0));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(0));
}
}
@@ -68,7 +69,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
SmiConstant(0));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(instance, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(instance, offset, SmiConstant(0));
}
Label out(this);
@@ -182,33 +183,26 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Goto(&out);
BIND(&if_notcallable);
- Node* message = SmiConstant(MessageTemplate::kPromiseNonCallable);
StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
UndefinedConstant());
StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
UndefinedConstant());
StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
UndefinedConstant());
- CallRuntime(Runtime::kThrowTypeError, context, message);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
BIND(&if_not_constructor);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kNotConstructor);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, constructor);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, constructor);
BIND(&out);
return var_result.value();
}
-Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
- int slots) {
+void PromiseBuiltinsAssembler::InitializeFunctionContext(Node* native_context,
+ Node* context,
+ int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
-
- Node* const context = Allocate(FixedArray::SizeFor(slots));
StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
SmiConstant(slots));
@@ -222,6 +216,14 @@ Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
TheHoleConstant());
StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
native_context);
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
+ int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const context = AllocateInNewSpace(FixedArray::SizeFor(slots));
+ InitializeFunctionContext(native_context, context, slots);
return context;
}
@@ -261,16 +263,7 @@ Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
- {
- Node* const method =
- method_name == nullptr
- ? UndefinedConstant()
- : HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name));
- Node* const message_id = SmiConstant(msg_template);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
- Unreachable();
- }
+ ThrowTypeError(context, msg_template, method_name);
BIND(&out);
return var_value_map.value();
@@ -324,22 +317,13 @@ Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
// 7. If IsConstructor(S) is true, return S.
Label throw_error(this);
GotoIf(TaggedIsSmi(species), &throw_error);
- Node* species_bitfield = LoadMapBitField(LoadMap(species));
- GotoIfNot(Word32Equal(Word32And(species_bitfield,
- Int32Constant((1 << Map::kIsConstructor))),
- Int32Constant(1 << Map::kIsConstructor)),
- &throw_error);
+ GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
var_result.Bind(species);
Goto(&out);
// 8. Throw a TypeError exception.
BIND(&throw_error);
- {
- Node* const message_id =
- SmiConstant(MessageTemplate::kSpeciesNotConstructor);
- CallRuntime(Runtime::kThrowTypeError, context, message_id);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
BIND(&out);
return var_result.value();
@@ -355,7 +339,7 @@ void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
Node* delta = IntPtrOrSmiConstant(1, mode);
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
const CodeStubAssembler::AllocationFlags flags =
CodeStubAssembler::kAllowLargeObjectAllocation;
@@ -515,34 +499,34 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
// Create new FixedArrays to store callbacks, and migrate
// existing callbacks.
Node* const deferred_promise_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(deferred_promise_arr, 0,
existing_deferred_promise);
StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
Node* const deferred_on_resolve_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
deferred_on_resolve_arr, 0,
LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
Node* const deferred_on_reject_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
deferred_on_reject_arr, 0,
LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
Node* const fulfill_reactions =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
fulfill_reactions, 0,
LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
Node* const reject_reactions =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
reject_reactions, 0,
LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
@@ -992,6 +976,31 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
PromiseFulfill(context, promise, value, v8::Promise::kRejected);
}
+void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
+ Node* context, Node* condition, const NodeGenerator& object) {
+ Label done(this);
+ GotoIfNot(condition, &done);
+ CallRuntime(Runtime::kSetProperty, context, object(),
+ HeapConstant(factory()->promise_forwarding_handler_symbol()),
+ TrueConstant(), SmiConstant(STRICT));
+ Goto(&done);
+ BIND(&done);
+}
+
+void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
+ Node* context, Node* condition, Node* promise,
+ const NodeGenerator& handled_by) {
+ Label done(this);
+ GotoIfNot(condition, &done);
+ GotoIf(TaggedIsSmi(promise), &done);
+ GotoIfNot(HasInstanceType(promise, JS_PROMISE_TYPE), &done);
+ CallRuntime(Runtime::kSetProperty, context, promise,
+ HeapConstant(factory()->promise_handled_by_symbol()),
+ handled_by(), SmiConstant(STRICT));
+ Goto(&done);
+ BIND(&done);
+}
+
// ES#sec-promise-reject-functions
// Promise Reject Functions
TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
@@ -1124,20 +1133,11 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
// 1. If NewTarget is undefined, throw a TypeError exception.
BIND(&if_targetisundefined);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kNotAPromise, new_target);
// 2. If IsCallable(executor) is false, throw a TypeError exception.
BIND(&if_notcallable);
- {
- Node* const message_id =
- SmiConstant(MessageTemplate::kResolverNotAFunction);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kResolverNotAFunction, executor);
// Silently fail if the stack looks fishy.
BIND(&if_noaccess);
@@ -1155,20 +1155,6 @@ TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
Return(AllocateAndInitJSPromise(context, parent));
}
-TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
- Node* const maybe_promise = Parameter(Descriptor::kObject);
- Label if_notpromise(this, Label::kDeferred);
-
- GotoIf(TaggedIsSmi(maybe_promise), &if_notpromise);
-
- Node* const result =
- SelectBooleanConstant(HasInstanceType(maybe_promise, JS_PROMISE_TYPE));
- Return(result);
-
- BIND(&if_notpromise);
- Return(FalseConstant());
-}
-
// ES#sec-promise.prototype.then
// Promise.prototype.catch ( onFulfilled, onRejected )
TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
@@ -1334,9 +1320,8 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_rejectpromise);
{
- Callable promise_handle_reject = CodeFactory::PromiseHandleReject(isolate);
- CallStub(promise_handle_reject, context, deferred_promise,
- deferred_on_reject, var_reason.value());
+ CallBuiltin(Builtins::kPromiseHandleReject, context, deferred_promise,
+ deferred_on_reject, var_reason.value());
Goto(&promisehook_after);
}
@@ -1499,9 +1484,7 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Return(UndefinedConstant());
BIND(&if_alreadyinvoked);
- Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
- CallRuntime(Runtime::kThrowTypeError, context, message);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kPromiseExecutorAlreadyInvoked);
}
// ES6 #sec-newpromisecapability
@@ -1819,5 +1802,430 @@ TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
Return(result_promise);
}
+Node* PromiseBuiltinsAssembler::PerformPromiseAll(
+ Node* context, Node* constructor, Node* capability, Node* iterator,
+ Label* if_exception, Variable* var_exception) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+ Label close_iterator(this);
+
+ Node* const instrumenting = IsDebugActive();
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ SetForwardingHandlerIfTrue(
+ context, instrumenting,
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
+ Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Node* const remaining_elements = AllocateSmiCell(1);
+
+ VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(0));
+
+ Label loop(this, &var_index), break_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
+ // ReturnIfAbrupt(next).
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* const next = iter_assembler.IteratorStep(
+ context, iterator, &break_loop, fast_iterator_result_map, if_exception,
+ var_exception);
+
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true.
+ // ReturnIfAbrupt(nextValue).
+ Node* const next_value = iter_assembler.IteratorValue(
+ context, next, fast_iterator_result_map, if_exception, var_exception);
+
+ // Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
+ Node* const promise_resolve =
+ GetProperty(context, constructor, factory()->resolve_string());
+ GotoIfException(promise_resolve, &close_iterator, var_exception);
+
+ Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
+ promise_resolve, constructor, next_value);
+ GotoIfException(next_promise, &close_iterator, var_exception);
+
+ // Let resolveElement be a new built-in function object as defined in
+ // Promise.all Resolve Element Functions.
+ Node* const resolve_context =
+ CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementAlreadyVisitedSlot,
+ SmiConstant(0));
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
+ remaining_elements);
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementCapabilitySlot, capability);
+ StoreContextElementNoWriteBarrier(resolve_context,
+ kPromiseAllResolveElementValuesArraySlot,
+ values_array);
+
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info = LoadContextElement(
+ native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, resolve_context);
+
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] + 1.
+ {
+ Label if_outofrange(this, Label::kDeferred), done(this);
+ IncrementSmiCell(remaining_elements, &if_outofrange);
+ Goto(&done);
+
+ BIND(&if_outofrange);
+ {
+ // If the incremented value is out of Smi range, crash.
+ Abort(kOffsetOutOfRange);
+ }
+
+ BIND(&done);
+ }
+
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ Node* const then =
+ GetProperty(context, next_promise, factory()->then_string());
+ GotoIfException(then, &close_iterator, var_exception);
+
+ Node* const then_call = CallJS(
+ CodeFactory::Call(isolate()), context, then, next_promise, resolve,
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ GotoIfException(then_call, &close_iterator, var_exception);
+
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ // Load promiseCapability.[[Promise]]
+ return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ });
+
+ // Set index to index + 1
+ var_index.Bind(NumberInc(var_index.value()));
+ Goto(&loop);
+ }
+
+ BIND(&close_iterator);
+ {
+ // Exception must be bound to a JS value.
+ CSA_ASSERT(this, IsNotTheHole(var_exception->value()));
+ iter_assembler.IteratorCloseOnException(context, iterator, if_exception,
+ var_exception);
+ }
+
+ BIND(&break_loop);
+ {
+ Label resolve_promise(this), return_promise(this);
+ // Set iteratorRecord.[[Done]] to true.
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] - 1.
+ Node* const remaining = DecrementSmiCell(remaining_elements);
+ Branch(SmiEqual(remaining, SmiConstant(0)), &resolve_promise,
+ &return_promise);
+
+ // If remainingElementsCount.[[Value]] is 0, then
+ // Let valuesArray be CreateArrayFromList(values).
+ // Perform ? Call(resultCapability.[[Resolve]], undefined,
+ // Ā« valuesArray Ā»).
+ BIND(&resolve_promise);
+
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ Node* const resolve_call =
+ CallJS(CodeFactory::Call(isolate()), context, resolve,
+ UndefinedConstant(), values_array);
+ GotoIfException(resolve_call, if_exception, var_exception);
+ Goto(&return_promise);
+
+ // Return resultCapability.[[Promise]].
+ BIND(&return_promise);
+ }
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ return promise;
+}
+
+Node* PromiseBuiltinsAssembler::IncrementSmiCell(Node* cell,
+ Label* if_overflow) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ Node* value = LoadCellValue(cell);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
+
+ if (if_overflow != nullptr) {
+ GotoIf(SmiEqual(value, SmiConstant(Smi::kMaxValue)), if_overflow);
+ }
+
+ Node* result = SmiAdd(value, SmiConstant(1));
+ StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+Node* PromiseBuiltinsAssembler::DecrementSmiCell(Node* cell) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ Node* value = LoadCellValue(cell);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
+
+ Node* result = SmiSub(value, SmiConstant(1));
+ StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+// ES#sec-promise.all
+// Promise.all ( iterable )
+TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+
+ // Let C be the this value.
+ // If Type(C) is not Object, throw a TypeError exception.
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "Promise.all");
+
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does not
+ // trigger redundant ExceptionEvents
+ Node* const debug_event = FalseConstant();
+ Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Label reject_promise(this, &var_exception, Label::kDeferred);
+
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ Node* const iterable = Parameter(Descriptor::kIterable);
+ Node* const iterator = iter_assembler.GetIterator(
+ context, iterable, &reject_promise, &var_exception);
+
+ // Let result be PerformPromiseAll(iteratorRecord, C, promiseCapability).
+ // If result is an abrupt completion, then
+ // If iteratorRecord.[[Done]] is false, let result be
+ // IteratorClose(iterator, result).
+ // IfAbruptRejectPromise(result, promiseCapability).
+ Node* const result = PerformPromiseAll(
+ context, receiver, capability, iterator, &reject_promise, &var_exception);
+
+ Return(result);
+
+ BIND(&reject_promise);
+ {
+ // Exception must be bound to a JS value.
+ CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, context, reject, UndefinedConstant(),
+ var_exception.value());
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
+
+TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
+ SmiConstant(kPromiseAllResolveElementLength)));
+
+ Label already_called(this), resolve_promise(this);
+ GotoIf(SmiEqual(LoadContextElement(
+ context, kPromiseAllResolveElementAlreadyVisitedSlot),
+ SmiConstant(1)),
+ &already_called);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementAlreadyVisitedSlot, SmiConstant(1));
+
+ Node* const index =
+ LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
+ Node* const values_array =
+ LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+
+ // Set element in FixedArray
+ Label runtime_set_element(this), did_set_element(this);
+ GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
+ {
+ VARIABLE(var_elements, MachineRepresentation::kTagged,
+ LoadElements(values_array));
+ PossiblyGrowElementsCapacity(SMI_PARAMETERS, PACKED_ELEMENTS, values_array,
+ index, &var_elements, SmiConstant(1),
+ &runtime_set_element);
+ StoreFixedArrayElement(var_elements.value(), index, value,
+ UPDATE_WRITE_BARRIER, 0, SMI_PARAMETERS);
+
+ // Update array length
+ Label did_set_length(this);
+ Node* const length = LoadJSArrayLength(values_array);
+ GotoIfNot(TaggedIsPositiveSmi(length), &did_set_length);
+ Node* const new_length = SmiAdd(index, SmiConstant(1));
+ GotoIfNot(SmiLessThan(length, new_length), &did_set_length);
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ new_length);
+ // Assert that valuesArray.[[Length]] is less than or equal to the
+ // elements backing-store length.e
+ CSA_SLOW_ASSERT(
+ this, SmiAboveOrEqual(LoadFixedArrayBaseLength(var_elements.value()),
+ new_length));
+ Goto(&did_set_length);
+ BIND(&did_set_length);
+ }
+ Goto(&did_set_element);
+ BIND(&runtime_set_element);
+ // New-space filled up or index too large, set element via runtime
+ CallRuntime(Runtime::kCreateDataProperty, context, values_array, index,
+ value);
+ Goto(&did_set_element);
+ BIND(&did_set_element);
+
+ Node* const remaining_elements = LoadContextElement(
+ context, kPromiseAllResolveElementRemainingElementsSlot);
+ Node* const result = DecrementSmiCell(remaining_elements);
+ GotoIf(SmiEqual(result, SmiConstant(0)), &resolve_promise);
+ Return(UndefinedConstant());
+
+ BIND(&resolve_promise);
+ Node* const capability =
+ LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(),
+ values_array);
+ Return(UndefinedConstant());
+
+ BIND(&already_called);
+ Return(UndefinedConstant());
+}
+
+// ES#sec-promise.race
+// Promise.race ( iterable )
+TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "Promise.race");
+
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does not
+ // trigger redundant ExceptionEvents
+ Node* const debug_event = FalseConstant();
+ Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+
+ Node* const instrumenting = IsDebugActive();
+
+ Label close_iterator(this, Label::kDeferred);
+ Label reject_promise(this, Label::kDeferred);
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ SetForwardingHandlerIfTrue(context, instrumenting, reject);
+
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ Node* const iterable = Parameter(Descriptor::kIterable);
+ Node* const iterator = iter_assembler.GetIterator(
+ context, iterable, &reject_promise, &var_exception);
+
+ // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
+ {
+ Label loop(this), break_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const fast_iterator_result_map = LoadContextElement(
+ native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
+ // ReturnIfAbrupt(next).
+ Node* const next = iter_assembler.IteratorStep(
+ context, iterator, &break_loop, fast_iterator_result_map,
+ &reject_promise, &var_exception);
+
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true.
+ // ReturnIfAbrupt(nextValue).
+ Node* const next_value =
+ iter_assembler.IteratorValue(context, next, fast_iterator_result_map,
+ &reject_promise, &var_exception);
+
+ // Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
+ Node* const promise_resolve =
+ GetProperty(context, receiver, factory()->resolve_string());
+ GotoIfException(promise_resolve, &close_iterator, &var_exception);
+
+ Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
+ promise_resolve, receiver, next_value);
+ GotoIfException(next_promise, &close_iterator, &var_exception);
+
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ Node* const then =
+ GetProperty(context, next_promise, factory()->then_string());
+ GotoIfException(then, &close_iterator, &var_exception);
+
+ Node* const then_call = CallJS(CodeFactory::Call(isolate()), context,
+ then, next_promise, resolve, reject);
+ GotoIfException(then_call, &close_iterator, &var_exception);
+
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ // Load promiseCapability.[[Promise]]
+ return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ });
+ Goto(&loop);
+ }
+
+ BIND(&break_loop);
+ Return(LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ }
+
+ BIND(&close_iterator);
+ {
+ CSA_ASSERT(this, IsNotTheHole(var_exception.value()));
+ iter_assembler.IteratorCloseOnException(context, iterator, &reject_promise,
+ &var_exception);
+ }
+
+ BIND(&reject_promise);
+ {
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, context, reject, UndefinedConstant(),
+ var_exception.value());
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index a03132d6a6..997933e10b 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -28,6 +28,27 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
kPromiseContextLength,
};
+ protected:
+ enum PromiseAllResolveElementContextSlots {
+ // Whether the resolve callback was already called.
+ kPromiseAllResolveElementAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // Index into the values array
+ kPromiseAllResolveElementIndexSlot,
+
+ // Remaining elements count (mutable HeapNumber)
+ kPromiseAllResolveElementRemainingElementsSlot,
+
+ // Promise capability from Promise.all
+ kPromiseAllResolveElementCapabilitySlot,
+
+ // Values array from Promise.all
+ kPromiseAllResolveElementValuesArraySlot,
+
+ kPromiseAllResolveElementLength
+ };
+
+ public:
enum FunctionContextSlot {
kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
@@ -113,6 +134,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
Label* if_isunmodified, Label* if_ismodified);
+ void InitializeFunctionContext(Node* native_context, Node* context, int len);
Node* CreatePromiseContext(Node* native_context, int slots);
void PromiseFulfill(Node* context, Node* promise, Node* result,
v8::Promise::PromiseState status);
@@ -135,6 +157,23 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunctionContext(Node* reason, Node* native_context);
Node* CreateThrowerFunction(Node* reason, Node* native_context);
+ Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability,
+ Node* iterator, Label* if_exception,
+ Variable* var_exception);
+
+ Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
+ Node* DecrementSmiCell(Node* cell);
+
+ void SetForwardingHandlerIfTrue(Node* context, Node* condition,
+ const NodeGenerator& object);
+ inline void SetForwardingHandlerIfTrue(Node* context, Node* condition,
+ Node* object) {
+ return SetForwardingHandlerIfTrue(context, condition,
+ [object]() -> Node* { return object; });
+ }
+ void SetPromiseHandledByIfTrue(Node* context, Node* condition, Node* promise,
+ const NodeGenerator& handled_by);
+
private:
Node* AllocateJSPromise(Node* context);
};
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
new file mode 100644
index 0000000000..671bfa21fb
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(IsPromise) {
+ SealHandleScope scope(isolate);
+
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(object->IsJSPromise());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
new file mode 100644
index 0000000000..30b0f08ec0
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -0,0 +1,215 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+using compiler::Node;
+using compiler::CodeAssembler;
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+TF_BUILTIN(ProxyConstructor, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
+}
+
+class ProxiesCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* IsProxyRevoked(Node* proxy) {
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+ CSA_ASSERT(this, Word32Or(IsJSReceiver(handler), IsNull(handler)));
+
+ return IsNull(handler);
+ }
+
+ void GotoIfProxyRevoked(Node* object, Label* if_proxy_revoked) {
+ Label continue_checks(this);
+ GotoIfNot(IsJSProxy(object), &continue_checks);
+ GotoIf(IsProxyRevoked(object), if_proxy_revoked);
+ Goto(&continue_checks);
+ BIND(&continue_checks);
+ }
+
+ Node* AllocateProxy(Node* target, Node* handler, Node* context) {
+ VARIABLE(map, MachineRepresentation::kTagged);
+
+ Label callable_target(this), constructor_target(this), none_target(this),
+ create_proxy(this);
+
+ Node* nativeContext = LoadNativeContext(context);
+
+ Branch(IsCallable(target), &callable_target, &none_target);
+
+ BIND(&callable_target);
+ {
+ // Every object that is a constructor is implicitly callable
+ // so it's okay to nest this check here
+ GotoIf(IsConstructor(target), &constructor_target);
+ map.Bind(
+ LoadContextElement(nativeContext, Context::PROXY_CALLABLE_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+ BIND(&constructor_target);
+ {
+ map.Bind(LoadContextElement(nativeContext,
+ Context::PROXY_CONSTRUCTOR_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+ BIND(&none_target);
+ {
+ map.Bind(LoadContextElement(nativeContext, Context::PROXY_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+
+ BIND(&create_proxy);
+ Node* proxy = Allocate(JSProxy::kSize);
+ StoreMapNoWriteBarrier(proxy, map.value());
+ StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset,
+ Heap::kEmptyPropertiesDictionaryRootIndex);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHashOffset,
+ UndefinedConstant());
+
+ return proxy;
+ }
+
+ Node* AllocateJSArrayForCodeStubArguments(Node* context,
+ CodeStubArguments& args, Node* argc,
+ ParameterMode mode) {
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* argc_smi = ParameterToTagged(argc, mode);
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, argc_smi, nullptr, argc, INTPTR_PARAMETERS);
+
+ StoreMapNoWriteBarrier(elements, Heap::kFixedArrayMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArrayBase::kLengthOffset,
+ argc_smi);
+
+ VARIABLE(index, MachineType::PointerRepresentation());
+ index.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&index}, zone());
+ args.ForEach(list, [this, elements, &index](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements,
+ index.value(), arg);
+ Increment(index, kPointerSize);
+ });
+ return array;
+ }
+};
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
+ int const kTargetArg = 0;
+ int const kHandlerArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* target = args.GetOptionalArgumentValue(kTargetArg);
+ Node* handler = args.GetOptionalArgumentValue(kHandlerArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfProxyRevoked(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfProxyRevoked(handler, &throw_proxy_handler_or_target_revoked);
+
+ args.PopAndReturn(AllocateProxy(target, handler, context));
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+}
+
+TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
+ Node* argc = Parameter(Descriptor::kActualArgumentsCount);
+ Node* argc_ptr = ChangeInt32ToIntPtr(argc);
+ Node* proxy = Parameter(Descriptor::kFunction);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsJSProxy(proxy));
+ CSA_ASSERT(this, IsCallable(proxy));
+
+ Label throw_proxy_handler_revoked(this, Label::kDeferred),
+ trap_undefined(this), trap_defined(this, Label::kDeferred);
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+
+ // 2. If handler is null, throw a TypeError exception.
+ CSA_ASSERT(this, Word32Or(IsJSReceiver(handler), IsNull(handler)));
+ GotoIf(IsNull(handler), &throw_proxy_handler_revoked);
+
+ // 3. Assert: Type(handler) is Object.
+ CSA_ASSERT(this, IsJSReceiver(handler));
+
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+
+ // 5. Let trap be ? GetMethod(handler, "apply").
+ Handle<Name> trap_name = factory()->apply_string();
+ Node* trap = GetProperty(context, handler, trap_name);
+
+ // 6. If trap is undefined, then
+ GotoIf(IsUndefined(trap), &trap_undefined);
+ Branch(IsNull(trap), &trap_undefined, &trap_defined);
+
+ BIND(&trap_defined);
+ {
+ CodeStubArguments args(this, argc_ptr);
+ Node* receiver = args.GetReceiver();
+
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Node* array = AllocateJSArrayForCodeStubArguments(context, args, argc_ptr,
+ INTPTR_PARAMETERS);
+
+ // 8. Return Call(trap, handler, Ā«target, thisArgument, argArrayĀ»).
+ Node* result = CallJS(CodeFactory::Call(isolate()), context, trap, handler,
+ target, receiver, array);
+ args.PopAndReturn(result);
+ }
+
+ BIND(&trap_undefined);
+ {
+ // 6.a. Return Call(target, thisArgument, argumentsList).
+ TailCallStub(CodeFactory::Call(isolate()), context, target, argc);
+ }
+
+ BIND(&throw_proxy_handler_revoked);
+ {
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kProxyRevoked),
+ StringConstant("apply"));
+ Unreachable();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy.cc b/deps/v8/src/builtins/builtins-proxy.cc
deleted file mode 100644
index db6f7b57c9..0000000000
--- a/deps/v8/src/builtins/builtins-proxy.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
-#include "src/counters.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
-BUILTIN(ProxyConstructor) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("Proxy")));
-}
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
-BUILTIN(ProxyConstructor_ConstructStub) {
- HandleScope scope(isolate);
- DCHECK(isolate->proxy_function()->IsConstructor());
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> handler = args.atOrUndefined(isolate, 2);
- RETURN_RESULT_OR_FAILURE(isolate, JSProxy::New(isolate, target, handler));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 04a35bd000..e32ff69c95 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -52,7 +52,7 @@ void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
// Store through runtime.
// TODO(ishell): Use SetPropertyStub here once available.
Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- Node* const language_mode = SmiConstant(Smi::FromInt(STRICT));
+ Node* const language_mode = SmiConstant(STRICT);
CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
language_mode);
}
@@ -257,7 +257,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
ToDirectStringAssembler to_direct(state(), string);
VARIABLE(var_result, MachineRepresentation::kTagged);
- Label out(this), runtime(this, Label::kDeferred);
+ Label out(this), atom(this), runtime(this, Label::kDeferred);
// External constants.
Node* const isolate_address =
@@ -269,11 +269,20 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
Node* const static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
- // Ensure that a RegExp stack is allocated.
+ // At this point, last_index is definitely a canonicalized non-negative
+ // number, which implies that any non-Smi last_index is greater than
+ // the maximal string length. If lastIndex > string.length then the matcher
+ // must fail.
+
+ Label if_failure(this);
+ Node* const smi_string_length = LoadStringLength(string);
{
- Node* const stack_size =
- Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
- GotoIf(IntPtrEqual(stack_size, int_zero), &runtime);
+ CSA_ASSERT(this, IsNumberNormalized(last_index));
+ CSA_ASSERT(this, IsNumberPositive(last_index));
+ Node* const last_index_is_not_smi = TaggedIsNotSmi(last_index);
+ Node* const last_index_is_oob =
+ SmiGreaterThan(last_index, smi_string_length);
+ GotoIf(Word32Or(last_index_is_not_smi, last_index_is_oob), &if_failure);
}
Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
@@ -282,10 +291,25 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
CSA_ASSERT(this, TaggedIsNotSmi(data));
CSA_ASSERT(this, HasInstanceType(data, FIXED_ARRAY_TYPE));
- // Check the type of the RegExp. Only continue if type is
- // JSRegExp::IRREGEXP.
- Node* const tag = LoadFixedArrayElement(data, JSRegExp::kTagIndex);
- GotoIfNot(SmiEqual(tag, SmiConstant(JSRegExp::IRREGEXP)), &runtime);
+ // Dispatch on the type of the RegExp.
+ {
+ Label next(this), unreachable(this, Label::kDeferred);
+ Node* const tag = LoadAndUntagToWord32FixedArrayElement(
+ data, IntPtrConstant(JSRegExp::kTagIndex));
+
+ int32_t values[] = {
+ JSRegExp::IRREGEXP, JSRegExp::ATOM, JSRegExp::NOT_COMPILED,
+ };
+ Label* labels[] = {&next, &atom, &runtime};
+
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+ Switch(tag, &unreachable, values, labels, arraysize(values));
+
+ BIND(&unreachable);
+ Unreachable();
+
+ BIND(&next);
+ }
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures <= offsets vector size / 2 - 1
@@ -300,23 +324,18 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
&runtime);
}
+ // Ensure that a RegExp stack is allocated. This check is after branching off
+ // for ATOM regexps to avoid unnecessary trips to runtime.
+ {
+ Node* const stack_size =
+ Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
+ GotoIf(IntPtrEqual(stack_size, int_zero), &runtime);
+ }
+
// Unpack the string if possible.
to_direct.TryToDirect(&runtime);
- Node* const smi_string_length = LoadStringLength(string);
-
- // At this point, last_index is definitely a canonicalized non-negative
- // number, which implies that any non-Smi last_index is greater than
- // the maximal string length. If lastIndex > string.length then the matcher
- // must fail.
-
- Label if_failure(this);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
- GotoIfNot(TaggedIsSmi(last_index), &if_failure); // Outside Smi range.
- GotoIf(SmiGreaterThan(last_index, smi_string_length), &if_failure);
-
// Load the irregexp code object and offsets into the subject string. Both
// depend on whether the string is one- or two-byte.
@@ -358,10 +377,22 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
}
// Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // smi (code flushing support).
+ // encoding. If it has, the field contains a code object; and otherwise it
+ // contains the uninitialized sentinel as a smi.
Node* const code = var_code.value();
+#ifdef DEBUG
+ {
+ Label next(this);
+ GotoIfNot(TaggedIsSmi(code), &next);
+
+ CSA_ASSERT(this,
+ SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)));
+ Goto(&next);
+
+ BIND(&next);
+ }
+#endif
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
@@ -481,7 +512,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0);
Node* const to_offset = ElementOffsetFromIndex(
- IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), FAST_ELEMENTS,
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS,
INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag);
VARIABLE(var_to_offset, MachineType::PointerRepresentation(), to_offset);
@@ -513,8 +544,8 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
{
// A stack overflow was detected in RegExp code.
#ifdef DEBUG
- Node* const pending_exception_address = ExternalConstant(
- ExternalReference(Isolate::kPendingExceptionAddress, isolate()));
+ Node* const pending_exception_address = ExternalConstant(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate()));
CSA_ASSERT(this, IsTheHole(Load(MachineType::AnyTagged(),
pending_exception_address)));
#endif // DEBUG
@@ -530,6 +561,16 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
Goto(&out);
}
+ BIND(&atom);
+ {
+ // TODO(jgruber): A call with 4 args stresses register allocation, this
+ // should probably just be inlined.
+ Node* const result = CallBuiltin(Builtins::kRegExpExecAtom, context, regexp,
+ string, last_index, match_info);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
BIND(&out);
return var_result.value();
#endif // V8_INTERPRETED_REGEXP
@@ -546,7 +587,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Label* if_didnotmatch, const bool is_fastpath) {
Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
if (is_fastpath) {
CSA_ASSERT(this, IsFastRegExpNoPrototype(context, regexp));
@@ -714,16 +755,10 @@ Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
{
- Node* const message_id = SmiConstant(Smi::FromInt(msg_template));
- Node* const method_name_str = HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
-
Node* const value_str =
CallBuiltin(Builtins::kToString, context, maybe_receiver);
-
- CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
- value_str);
- Unreachable();
+ ThrowTypeError(context, msg_template, StringConstant(method_name),
+ value_str);
}
BIND(&out);
@@ -851,6 +886,70 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
Return(RegExpPrototypeExecBody(context, regexp, string, false));
}
+// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
+// and {match_info} is updated on success.
+// The slow path is implemented in RegExpImpl::AtomExec.
+TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
+ Node* const regexp = Parameter(Descriptor::kRegExp);
+ Node* const subject_string = Parameter(Descriptor::kString);
+ Node* const last_index = Parameter(Descriptor::kLastIndex);
+ Node* const match_info = Parameter(Descriptor::kMatchInfo);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsJSRegExp(regexp));
+ CSA_ASSERT(this, IsString(subject_string));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+ CSA_ASSERT(this, IsFixedArray(match_info));
+
+ Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+ CSA_ASSERT(this, IsFixedArray(data));
+ CSA_ASSERT(this, SmiEqual(LoadFixedArrayElement(data, JSRegExp::kTagIndex),
+ SmiConstant(JSRegExp::ATOM)));
+
+ // Callers ensure that last_index is in-bounds.
+ CSA_ASSERT(this,
+ SmiLessThanOrEqual(last_index, LoadStringLength(subject_string)));
+
+ Node* const needle_string =
+ LoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
+ CSA_ASSERT(this, IsString(needle_string));
+
+ Node* const match_from =
+ CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
+ needle_string, last_index);
+ CSA_ASSERT(this, TaggedIsSmi(match_from));
+
+ Label if_failure(this), if_success(this);
+ Branch(SmiEqual(match_from, SmiConstant(-1)), &if_failure, &if_success);
+
+ BIND(&if_success);
+ {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(match_from));
+ CSA_ASSERT(this, SmiLessThan(match_from, LoadStringLength(subject_string)));
+
+ const int kNumRegisters = 2;
+ STATIC_ASSERT(RegExpMatchInfo::kInitialCaptureIndices >= kNumRegisters);
+
+ Node* const match_to = SmiAdd(match_from, LoadStringLength(needle_string));
+
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
+ SmiConstant(kNumRegisters), SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
+ subject_string);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
+ subject_string);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex,
+ match_from, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex + 1,
+ match_to, SKIP_WRITE_BARRIER);
+
+ Return(match_info);
+ }
+
+ BIND(&if_failure);
+ Return(NullConstant());
+}
+
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
@@ -864,7 +963,7 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
@@ -1050,13 +1149,13 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
// Normalize pattern.
Node* const pattern =
Select(IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
- [=] { return ToString(context, maybe_pattern); },
+ [=] { return ToString_Inline(context, maybe_pattern); },
MachineRepresentation::kTagged);
// Normalize flags.
Node* const flags =
Select(IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
- [=] { return ToString(context, maybe_flags); },
+ [=] { return ToString_Inline(context, maybe_flags); },
MachineRepresentation::kTagged);
// Initialize.
@@ -1308,8 +1407,7 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
BIND(&if_isnotprototype);
{
- Node* const message_id =
- SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
Node* const method_name_str =
HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
"RegExp.prototype.source"));
@@ -1322,9 +1420,9 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
JSRegExp::Flag flag) {
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const mask = SmiConstant(Smi::FromInt(flag));
+ Node* const mask = SmiConstant(flag);
Node* const is_flag_set = WordNotEqual(SmiAnd(flags, mask), smi_zero);
return is_flag_set;
@@ -1428,7 +1526,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
BIND(&if_isprototype);
{
if (counter != -1) {
- Node* const counter_smi = SmiConstant(Smi::FromInt(counter));
+ Node* const counter_smi = SmiConstant(counter);
CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
}
Return(UndefinedConstant());
@@ -1436,8 +1534,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
BIND(&if_isnotprototype);
{
- Node* const message_id =
- SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
Node* const method_name_str = HeapConstant(
isolate->factory()->NewStringFromAsciiChecked(method_name));
CallRuntime(Runtime::kThrowTypeError, context, message_id,
@@ -1578,7 +1675,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1720,7 +1817,7 @@ class GrowableFixedArray {
Node* ToJSArray(Node* const context) {
CodeStubAssembler* a = assembler_;
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = a->LoadNativeContext(context);
Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
@@ -1757,7 +1854,7 @@ class GrowableFixedArray {
void Initialize() {
CodeStubAssembler* a = assembler_;
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
static const int kInitialArraySize = 8;
Node* const capacity = a->IntPtrConstant(kInitialArraySize);
@@ -1793,7 +1890,7 @@ class GrowableFixedArray {
CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
const CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS;
@@ -1827,7 +1924,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1910,7 +2007,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// TODO(ishell): Use GetElement stub once it's available.
Node* const match = GetProperty(context, result, smi_zero);
- var_match.Bind(ToString(context, match));
+ var_match.Bind(ToString_Inline(context, match));
Goto(&if_didmatch);
}
}
@@ -1984,7 +2081,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2005,7 +2102,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
Node* const previous_last_index = FastLoadLastIndex(regexp);
// Ensure last index is 0.
- FastStoreLastIndex(regexp, SmiConstant(Smi::kZero));
+ FastStoreLastIndex(regexp, SmiConstant(0));
// Call exec.
Label if_didnotmatch(this);
@@ -2038,7 +2135,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
Isolate* const isolate = this->isolate();
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
// Grab the initial value of last index.
Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
@@ -2111,7 +2208,7 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2138,7 +2235,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const int_zero = IntPtrConstant(0);
Node* const int_limit = SmiUntag(limit);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
Node* const allocation_site = nullptr;
@@ -2444,10 +2541,8 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string =
- args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
- Node* const maybe_limit =
- args.GetOptionalArgumentValue(kLimitArg, UndefinedConstant());
+ Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ Node* const maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
Node* const context = Parameter(BuiltinDescriptor::kContext);
// Ensure {maybe_receiver} is a JSReceiver.
@@ -2457,7 +2552,7 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
@@ -2486,7 +2581,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Node* const undefined = UndefinedConstant();
Node* const int_zero = IntPtrConstant(0);
Node* const int_one = IntPtrConstant(1);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const native_context = LoadNativeContext(context);
@@ -2499,7 +2594,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// Allocate {result_array}.
Node* result_array;
{
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
Node* const capacity = IntPtrConstant(16);
Node* const length = smi_zero;
@@ -2536,7 +2631,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
create_result(this);
- Branch(SmiEqual(num_capture_registers, SmiConstant(Smi::FromInt(2))),
+ Branch(SmiEqual(num_capture_registers, SmiConstant(2)),
&if_noexplicitcaptures, &if_hasexplicitcaptures);
BIND(&if_noexplicitcaptures);
@@ -2600,14 +2695,14 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_isstring);
{
- CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(elem)));
+ CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
var_match_start.value(), string);
- Node* const replacement_str = ToString(context, replacement_obj);
+ Node* const replacement_str = ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, i, replacement_str);
Node* const elem_length = LoadStringLength(elem);
@@ -2660,7 +2755,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// we got back from the callback function.
Node* const replacement_str =
- ToString(context, replacement_obj);
+ ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, index, replacement_str);
Goto(&do_continue);
@@ -2692,7 +2787,7 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// string replacement.
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, IsString(replace_string));
@@ -2865,10 +2960,8 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string =
- args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
- Node* const replace_value =
- args.GetOptionalArgumentValue(kReplaceValueArg, UndefinedConstant());
+ Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ Node* const replace_value = args.GetOptionalArgumentValue(kReplaceValueArg);
Node* const context = Parameter(BuiltinDescriptor::kContext);
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index d7a81a2ffe..8edb3574cd 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -70,8 +70,9 @@ MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess(
MessageTemplate::kInvalidAtomicAccessIndex),
Nothing<size_t>());
- size_t access_index = NumberToSize(*access_index_obj);
- if (access_index >= typed_array->length_value()) {
+ size_t access_index;
+ if (!TryNumberToSize(*access_index_obj, &access_index) ||
+ access_index >= typed_array->length_value()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
return Nothing<size_t>();
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index ee85476401..7dd7eaef76 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -126,7 +126,7 @@ void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
Node* context, Variable* var_start, Node* start, Node* string_length) {
Node* const start_int =
ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
Label done(this);
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
@@ -997,16 +997,16 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Branch(IsNullOrUndefined(value), &throw_exception, &out);
BIND(&throw_exception);
- TailCallRuntime(
- Runtime::kThrowCalledOnNullOrUndefined, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)));
+ TailCallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
+ StringConstant(method_name));
BIND(&out);
}
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Handle<Symbol> symbol,
- const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
+ const NodeFunction0& regexp_call, const NodeFunction1& generic_call,
+ CodeStubArguments* args) {
Label out(this);
// Smis definitely don't have an attached symbol.
@@ -1044,7 +1044,12 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
&slow_lookup);
BIND(&stub_call);
- Return(regexp_call());
+ Node* const result = regexp_call();
+ if (args == nullptr) {
+ Return(result);
+ } else {
+ args->PopAndReturn(result);
+ }
BIND(&slow_lookup);
}
@@ -1065,7 +1070,11 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
// Attempt to call the function.
Node* const result = generic_call(maybe_func);
- Return(result);
+ if (args == nullptr) {
+ Return(result);
+ } else {
+ args->PopAndReturn(result);
+ }
BIND(&out);
}
@@ -1144,9 +1153,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
[=]() {
Node* const subject_string = ToString_Inline(context, receiver);
- Callable replace_callable = CodeFactory::RegExpReplace(isolate());
- return CallStub(replace_callable, context, search, subject_string,
- replace);
+ return CallBuiltin(Builtins::kRegExpReplace, context, search,
+ subject_string, replace);
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
@@ -1155,8 +1163,6 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// Convert {receiver} and {search} to strings.
- Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
-
Node* const subject_string = ToString_Inline(context, receiver);
Node* const search_string = ToString_Inline(context, search);
@@ -1193,8 +1199,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// longer substrings - we can handle up to 8 chars (one-byte) / 4 chars
// (2-byte).
- Node* const match_start_index = CallStub(
- indexof_callable, context, subject_string, search_string, smi_zero);
+ Node* const match_start_index =
+ CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
+ search_string, smi_zero);
CSA_ASSERT(this, TaggedIsSmi(match_start_index));
// Early exit if no match found.
@@ -1294,9 +1301,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start =
- args.GetOptionalArgumentValue(kStart, UndefinedConstant());
- Node* const end = args.GetOptionalArgumentValue(kEnd, UndefinedConstant());
+ Node* const start = args.GetOptionalArgumentValue(kStart);
+ Node* const end = args.GetOptionalArgumentValue(kEnd);
Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const smi_zero = SmiConstant(0);
@@ -1305,8 +1311,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
RequireObjectCoercible(context, receiver, "String.prototype.slice");
// 2. Let S be ? ToString(O).
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const subject_string = CallStub(tostring_callable, context, receiver);
+ Node* const subject_string =
+ CallBuiltin(Builtins::kToString, context, receiver);
// 3. Let len be the number of elements in S.
Node* const length = LoadStringLength(subject_string);
@@ -1367,12 +1373,17 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
// ES6 section 21.1.3.19 String.prototype.split ( separator, limit )
TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
- Label out(this);
+ const int kSeparatorArg = 0;
+ const int kLimitArg = 1;
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const separator = Parameter(Descriptor::kSeparator);
- Node* const limit = Parameter(Descriptor::kLimit);
- Node* const context = Parameter(Descriptor::kContext);
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
+ Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const smi_zero = SmiConstant(0);
@@ -1385,14 +1396,14 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
[=]() {
Node* const subject_string = ToString_Inline(context, receiver);
- Callable split_callable = CodeFactory::RegExpSplit(isolate());
- return CallStub(split_callable, context, separator, subject_string,
- limit);
+ return CallBuiltin(Builtins::kRegExpSplit, context, separator,
+ subject_string, limit);
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
return CallJS(call_callable, context, fn, separator, receiver, limit);
- });
+ },
+ &args);
// String and integer conversions.
@@ -1408,7 +1419,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Label next(this);
GotoIfNot(SmiEqual(limit_number, smi_zero), &next);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1416,7 +1427,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const capacity = IntPtrConstant(0);
Node* const result = AllocateJSArray(kind, array_map, capacity, length);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1427,7 +1438,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Label next(this);
GotoIfNot(IsUndefined(separator), &next);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1438,7 +1449,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const fixed_array = LoadElements(result);
StoreFixedArrayElement(fixed_array, 0, subject_string);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1450,7 +1461,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const result = CallRuntime(Runtime::kStringToArray, context,
subject_string, limit_number);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1458,22 +1469,29 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const result =
CallRuntime(Runtime::kStringSplit, context, subject_string,
separator_string, limit_number);
- Return(result);
+ args.PopAndReturn(result);
}
// ES6 #sec-string.prototype.substr
TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
+ const int kStartArg = 0;
+ const int kLengthArg = 1;
+
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const start = args.GetOptionalArgumentValue(kStartArg);
+ Node* const length = args.GetOptionalArgumentValue(kLengthArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
Label out(this);
VARIABLE(var_start, MachineRepresentation::kTagged);
VARIABLE(var_length, MachineRepresentation::kTagged);
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const length = Parameter(Descriptor::kLength);
- Node* const context = Parameter(Descriptor::kContext);
-
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
@@ -1513,7 +1531,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
var_length.Bind(SmiMin(positive_length, minimal_length));
GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
}
BIND(&if_isheapnumber);
@@ -1522,7 +1540,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
+ CSA_ASSERT(this, IsHeapNumber(var_length.value()));
Label if_isnegative(this), if_ispositive(this);
Node* const float_zero = Float64Constant(0.);
@@ -1531,13 +1549,13 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
&if_ispositive);
BIND(&if_isnegative);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
BIND(&if_ispositive);
{
var_length.Bind(SmiSub(string_length, var_start.value()));
GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
}
}
@@ -1545,7 +1563,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
{
Node* const end = SmiAdd(var_start.value(), var_length.value());
Node* const result = SubString(context, string, var_start.value(), end);
- Return(result);
+ args.PopAndReturn(result);
}
}
@@ -1574,7 +1592,7 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
BIND(&if_isoutofbounds);
{
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
var_result.Bind(
SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit));
Goto(&out);
@@ -1584,10 +1602,10 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
BIND(&if_isnotsmi);
{
// {value} is a heap number - in this case, it is definitely out of bounds.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value_int)));
+ CSA_ASSERT(this, IsHeapNumber(value_int));
Node* const float_zero = Float64Constant(0.);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const value_float = LoadHeapNumberValue(value_int);
var_result.Bind(SelectTaggedConstant(
Float64LessThan(value_float, float_zero), smi_zero, limit));
@@ -1600,16 +1618,23 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
// ES6 #sec-string.prototype.substring
TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
+ const int kStartArg = 0;
+ const int kEndArg = 1;
+
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const start = args.GetOptionalArgumentValue(kStartArg);
+ Node* const end = args.GetOptionalArgumentValue(kEndArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
Label out(this);
VARIABLE(var_start, MachineRepresentation::kTagged);
VARIABLE(var_end, MachineRepresentation::kTagged);
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const end = Parameter(Descriptor::kEnd);
- Node* const context = Parameter(Descriptor::kContext);
-
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
ToThisString(context, receiver, "String.prototype.substring");
@@ -1643,7 +1668,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
{
Node* result =
SubString(context, string, var_start.value(), var_end.value());
- Return(result);
+ args.PopAndReturn(result);
}
}
@@ -1679,13 +1704,13 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
Node* iterator = Allocate(JSStringIterator::kSize);
StoreMapNoWriteBarrier(iterator, map);
- StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+ StoreObjectFieldRoot(iterator, JSValue::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
string);
- Node* index = SmiConstant(Smi::kZero);
+ Node* index = SmiConstant(0);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
index);
Return(iterator);
@@ -1705,7 +1730,7 @@ compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
- Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
+ Node* next_index = SmiAdd(index, SmiConstant(1));
GotoIfNot(SmiLessThan(next_index, length), &return_result);
var_trail.Bind(StringCharCodeAt(string, next_index));
@@ -1796,19 +1821,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&return_result);
{
- Node* native_context = LoadNativeContext(context);
- Node* map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- Node* result = Allocate(JSIteratorResult::kSize);
- StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
- var_value.value());
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
- var_done.value());
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
Return(result);
}
@@ -1816,12 +1830,272 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
{
// The {receiver} is not a valid JSGeneratorObject.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- "String Iterator.prototype.next", TENURED)),
- iterator);
+ StringConstant("String Iterator.prototype.next"), iterator);
Unreachable();
}
}
+Node* StringBuiltinsAssembler::ConcatenateSequentialStrings(
+ Node* context, Node* first_arg_ptr, Node* last_arg_ptr, Node* total_length,
+ String::Encoding encoding) {
+ Node* result;
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ result = AllocateSeqOneByteString(context, total_length, SMI_PARAMETERS);
+ } else {
+ DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
+ result = AllocateSeqTwoByteString(context, total_length, SMI_PARAMETERS);
+ }
+
+ VARIABLE(current_arg, MachineType::PointerRepresentation(), first_arg_ptr);
+ VARIABLE(str_index, MachineRepresentation::kTaggedSigned, SmiConstant(0));
+
+ Label loop(this, {&current_arg, &str_index}), done(this);
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ VARIABLE(current_string, MachineRepresentation::kTagged,
+ Load(MachineType::AnyTagged(), current_arg.value()));
+
+ Label deref_indirect(this, Label::kDeferred),
+ is_sequential(this, &current_string);
+
+ // Check if we need to dereference an indirect string.
+ Node* instance_type = LoadInstanceType(current_string.value());
+ Branch(IsSequentialStringInstanceType(instance_type), &is_sequential,
+ &deref_indirect);
+
+ BIND(&is_sequential);
+ {
+ CSA_ASSERT(this, IsSequentialStringInstanceType(
+ LoadInstanceType(current_string.value())));
+ Node* current_length = LoadStringLength(current_string.value());
+ CopyStringCharacters(current_string.value(), result, SmiConstant(0),
+ str_index.value(), current_length, encoding,
+ encoding, SMI_PARAMETERS);
+ str_index.Bind(SmiAdd(str_index.value(), current_length));
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ Branch(IntPtrGreaterThanOrEqual(current_arg.value(), last_arg_ptr), &loop,
+ &done);
+ }
+
+ BIND(&deref_indirect);
+ {
+ DerefIndirectString(&current_string, instance_type);
+ Goto(&is_sequential);
+ }
+ }
+ BIND(&done);
+ CSA_ASSERT(this, SmiEqual(str_index.value(), total_length));
+ return result;
+}
+
+Node* StringBuiltinsAssembler::ConcatenateStrings(Node* context,
+ Node* first_arg_ptr,
+ Node* arg_count,
+ Label* bailout_to_runtime) {
+ Label do_flat_string(this), do_cons_string(this), done(this);
+ // There must be at least two strings being concatenated.
+ CSA_ASSERT(this, Uint32GreaterThanOrEqual(arg_count, Int32Constant(2)));
+ // Arguments grow up on the stack, so subtract arg_count - 1 from first_arg to
+ // get the last argument to be concatenated.
+ Node* last_arg_ptr = IntPtrSub(
+ first_arg_ptr, TimesPointerSize(IntPtrSub(ChangeUint32ToWord(arg_count),
+ IntPtrConstant(1))));
+
+ VARIABLE(current_arg, MachineType::PointerRepresentation(), first_arg_ptr);
+ VARIABLE(current_string, MachineRepresentation::kTagged,
+ Load(MachineType::AnyTagged(), current_arg.value()));
+ VARIABLE(total_length, MachineRepresentation::kTaggedSigned, SmiConstant(0));
+ VARIABLE(result, MachineRepresentation::kTagged);
+
+ Node* string_encoding = Word32And(LoadInstanceType(current_string.value()),
+ Int32Constant(kStringEncodingMask));
+
+ Label flat_length_loop(this, {&current_arg, &current_string, &total_length}),
+ done_flat_length_loop(this);
+ Goto(&flat_length_loop);
+ BIND(&flat_length_loop);
+ {
+ Comment("Loop to find length and type of initial flat-string");
+ Label is_sequential_or_can_deref(this), check_deref_instance_type(this);
+
+ // Increment total_length by the current string's length.
+ Node* string_length = LoadStringLength(current_string.value());
+ CSA_ASSERT(this, TaggedIsSmi(string_length));
+ // No need to check for Smi overflow since String::kMaxLength is 2^28 - 16.
+ total_length.Bind(SmiAdd(total_length.value(), string_length));
+
+ // If we are above the min cons string length, bailout.
+ GotoIf(SmiAboveOrEqual(total_length.value(),
+ SmiConstant(ConsString::kMinLength)),
+ &done_flat_length_loop);
+
+ VARIABLE(instance_type, MachineRepresentation::kWord32,
+ LoadInstanceType(current_string.value()));
+
+ // Check if the new string is sequential or can be dereferenced as a
+ // sequential string. If it can't and we've reached here, we are still under
+ // ConsString::kMinLength so need to bailout to the runtime.
+ GotoIf(IsSequentialStringInstanceType(instance_type.value()),
+ &is_sequential_or_can_deref);
+ MaybeDerefIndirectString(&current_string, instance_type.value(),
+ &check_deref_instance_type, bailout_to_runtime);
+
+ BIND(&check_deref_instance_type);
+ {
+ instance_type.Bind(LoadInstanceType(current_string.value()));
+ Branch(IsSequentialStringInstanceType(instance_type.value()),
+ &is_sequential_or_can_deref, bailout_to_runtime);
+ }
+
+ BIND(&is_sequential_or_can_deref);
+
+ // Check that all the strings have the same encoding type. If we got here
+ // we are still under ConsString::kMinLength so need to bailout to the
+ // runtime if the strings have different encodings.
+ GotoIf(Word32NotEqual(string_encoding,
+ Word32And(instance_type.value(),
+ Int32Constant(kStringEncodingMask))),
+ bailout_to_runtime);
+
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ GotoIf(IntPtrLessThan(current_arg.value(), last_arg_ptr),
+ &done_flat_length_loop);
+ current_string.Bind(Load(MachineType::AnyTagged(), current_arg.value()));
+ Goto(&flat_length_loop);
+ }
+ BIND(&done_flat_length_loop);
+
+ // If new length is greater than String::kMaxLength, goto runtime to throw.
+ GotoIf(SmiAboveOrEqual(total_length.value(), SmiConstant(String::kMaxLength)),
+ bailout_to_runtime);
+
+ // If new length is less than ConsString::kMinLength, concatenate all operands
+ // as a flat string.
+ GotoIf(SmiLessThan(total_length.value(), SmiConstant(ConsString::kMinLength)),
+ &do_flat_string);
+
+ // If the new length is is greater than ConsString::kMinLength, create a flat
+ // string for first_arg to current_arg if there is at least two strings
+ // between.
+ {
+ Comment("New length is greater than ConsString::kMinLength");
+
+ // Subtract length of the last string that pushed us over the edge.
+ Node* string_length = LoadStringLength(current_string.value());
+ total_length.Bind(SmiSub(total_length.value(), string_length));
+
+ // If we have 2 or more operands under ConsString::kMinLength, concatenate
+ // them as a flat string before concatenating the rest as a cons string. We
+ // concatenate the initial string as a flat string even though we will end
+ // up with a cons string since the time and memory overheads of that initial
+ // flat string will be less than they would be for concatenating the whole
+ // string as cons strings.
+ GotoIf(
+ IntPtrGreaterThanOrEqual(IntPtrSub(first_arg_ptr, current_arg.value()),
+ IntPtrConstant(2 * kPointerSize)),
+ &do_flat_string);
+
+ // Otherwise the whole concatenation should be cons strings.
+ result.Bind(Load(MachineType::AnyTagged(), first_arg_ptr));
+ total_length.Bind(LoadStringLength(result.value()));
+ current_arg.Bind(IntPtrSub(first_arg_ptr, IntPtrConstant(kPointerSize)));
+ Goto(&do_cons_string);
+ }
+
+ BIND(&do_flat_string);
+ {
+ Comment("Flat string concatenation");
+ Node* last_flat_arg_ptr =
+ IntPtrAdd(current_arg.value(), IntPtrConstant(kPointerSize));
+ Label two_byte(this);
+ GotoIf(Word32Equal(string_encoding, Int32Constant(kTwoByteStringTag)),
+ &two_byte);
+
+ {
+ Comment("One-byte sequential string case");
+ result.Bind(ConcatenateSequentialStrings(
+ context, first_arg_ptr, last_flat_arg_ptr, total_length.value(),
+ String::ONE_BYTE_ENCODING));
+ // If there is still more arguments to concatenate, jump to the cons
+ // string case, otherwise we are done.
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done,
+ &do_cons_string);
+ }
+
+ BIND(&two_byte);
+ {
+ Comment("Two-byte sequential string case");
+ result.Bind(ConcatenateSequentialStrings(
+ context, first_arg_ptr, last_flat_arg_ptr, total_length.value(),
+ String::TWO_BYTE_ENCODING));
+ // If there is still more arguments to concatenate, jump to the cons
+ // string case, otherwise we are done.
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done,
+ &do_cons_string);
+ }
+ }
+
+ BIND(&do_cons_string);
+ {
+ Comment("Create cons string");
+ Label loop(this, {&current_arg, &total_length, &result}), done_cons(this);
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* current_string =
+ Load(MachineType::AnyTagged(), current_arg.value());
+ Node* string_length = LoadStringLength(current_string);
+
+ // Skip concatenating empty string.
+ GotoIf(SmiEqual(string_length, SmiConstant(0)), &done_cons);
+
+ total_length.Bind(SmiAdd(total_length.value(), string_length));
+
+ // If new length is greater than String::kMaxLength, goto runtime to
+ // throw. Note: we also need to invalidate the string length protector, so
+ // can't just throw here directly.
+ GotoIf(SmiAboveOrEqual(total_length.value(),
+ SmiConstant(String::kMaxLength)),
+ bailout_to_runtime);
+
+ result.Bind(NewConsString(context, total_length.value(), result.value(),
+ current_string, CodeStubAssembler::kNone));
+ Goto(&done_cons);
+
+ BIND(&done_cons);
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done, &loop);
+ }
+ }
+
+ BIND(&done);
+ IncrementCounter(isolate()->counters()->string_add_native(), 1);
+ return result.value();
+}
+
+TF_BUILTIN(StringConcat, StringBuiltinsAssembler) {
+ Node* argc = Parameter(Descriptor::kArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc),
+ CodeStubArguments::ReceiverMode::kNoReceiver);
+ Node* first_arg_ptr =
+ args.AtIndexPtr(IntPtrConstant(0), ParameterMode::INTPTR_PARAMETERS);
+
+ Label call_runtime(this, Label::kDeferred);
+ Node* result =
+ ConcatenateStrings(context, first_arg_ptr, argc, &call_runtime);
+ args.PopAndReturn(result);
+
+ BIND(&call_runtime);
+ TailCallRuntimeN(Runtime::kStringConcat, context, argc);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 399f565e55..ed1225328a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -24,6 +24,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Label* if_equal, Label* if_not_equal,
Label* if_notbothdirectonebyte);
+ // String concatenation.
+ Node* ConcatenateStrings(Node* context, Node* first_arg_ptr, Node* arg_count,
+ Label* bailout_to_runtime);
+
protected:
Node* DirectStringData(Node* string, Node* string_instance_type);
@@ -54,6 +58,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
UnicodeEncoding encoding);
+ Node* ConcatenateSequentialStrings(Node* context, Node* first_arg_ptr,
+ Node* arg_count, Node* total_length,
+ String::Encoding encoding);
+
void StringIndexOf(Node* const subject_string,
Node* const subject_instance_type,
Node* const search_string,
@@ -86,7 +94,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Handle<Symbol> symbol,
const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call);
+ const NodeFunction1& generic_call,
+ CodeStubArguments* args = nullptr);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index a6b1d02fa9..ba87d755f6 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -368,6 +368,7 @@ BUILTIN(StringPrototypeTrimRight) {
return *String::Trim(string, String::kTrimRight);
}
+#ifndef V8_INTL_SUPPORT
namespace {
inline bool ToUpperOverflows(uc32 character) {
@@ -518,7 +519,7 @@ MUST_USE_RESULT static Object* ConvertCase(
if (answer->IsException(isolate) || answer->IsString()) return answer;
DCHECK(answer->IsSmi());
- length = Smi::cast(answer)->value();
+ length = Smi::ToInt(answer);
if (s->IsOneByteRepresentation() && length > 0) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawOneByteString(length));
@@ -559,6 +560,7 @@ BUILTIN(StringPrototypeToUpperCase) {
return ConvertCase(string, isolate,
isolate->runtime_state()->to_upper_mapping());
}
+#endif // !V8_INTL_SUPPORT
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index 870be3b216..9505c4034f 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -112,7 +112,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
for (int offset = JSTypedArray::kSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
- StoreObjectField(holder, offset, SmiConstant(Smi::kZero));
+ StoreObjectField(holder, offset, SmiConstant(0));
}
}
@@ -216,7 +216,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(buffer, map);
- StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOrHashOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(buffer, JSArray::kElementsOffset,
empty_fixed_array);
@@ -227,7 +227,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// - Set backing_store to null/Smi(0).
// - Set all embedder fields to Smi(0).
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldSlot,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
int32_t bitfield_value = (1 << JSArrayBuffer::IsExternal::kShift) |
(1 << JSArrayBuffer::IsNeuterable::kShift);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
@@ -237,10 +237,10 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
int offset = JSArrayBuffer::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
@@ -397,14 +397,6 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
check_length(this), call_init(this), invalid_length(this),
length_undefined(this), length_defined(this);
- Callable add = CodeFactory::Add(isolate());
- Callable div = CodeFactory::Divide(isolate());
- Callable equal = CodeFactory::Equal(isolate());
- Callable greater_than = CodeFactory::GreaterThan(isolate());
- Callable less_than = CodeFactory::LessThan(isolate());
- Callable mod = CodeFactory::Modulus(isolate());
- Callable sub = CodeFactory::Subtract(isolate());
-
GotoIf(IsUndefined(byte_offset), &check_length);
offset.Bind(
@@ -422,11 +414,14 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
}
BIND(&offset_not_smi);
{
- GotoIf(IsTrue(CallStub(less_than, context, offset.value(), SmiConstant(0))),
+ GotoIf(IsTrue(CallBuiltin(Builtins::kLessThan, context, offset.value(),
+ SmiConstant(0))),
&invalid_length);
- Node* remainder = CallStub(mod, context, offset.value(), element_size);
+ Node* remainder =
+ CallBuiltin(Builtins::kModulus, context, offset.value(), element_size);
// Remainder can be a heap number.
- Branch(IsTrue(CallStub(equal, context, remainder, SmiConstant(0))),
+ Branch(IsTrue(CallBuiltin(Builtins::kEqual, context, remainder,
+ SmiConstant(0))),
&check_length, &start_offset_error);
}
@@ -439,16 +434,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
- Node* remainder = CallStub(mod, context, buffer_byte_length, element_size);
+ Node* remainder = CallBuiltin(Builtins::kModulus, context,
+ buffer_byte_length, element_size);
// Remainder can be a heap number.
- GotoIf(IsFalse(CallStub(equal, context, remainder, SmiConstant(0))),
+ GotoIf(IsFalse(CallBuiltin(Builtins::kEqual, context, remainder,
+ SmiConstant(0))),
&byte_length_error);
- new_byte_length.Bind(
- CallStub(sub, context, buffer_byte_length, offset.value()));
+ new_byte_length.Bind(CallBuiltin(Builtins::kSubtract, context,
+ buffer_byte_length, offset.value()));
- Branch(IsTrue(CallStub(less_than, context, new_byte_length.value(),
- SmiConstant(0))),
+ Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context,
+ new_byte_length.value(), SmiConstant(0))),
&invalid_offset_error, &call_init);
}
@@ -461,16 +458,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
- Node* end = CallStub(add, context, offset.value(), new_byte_length.value());
+ Node* end = CallBuiltin(Builtins::kAdd, context, offset.value(),
+ new_byte_length.value());
- Branch(IsTrue(CallStub(greater_than, context, end, buffer_byte_length)),
+ Branch(IsTrue(CallBuiltin(Builtins::kGreaterThan, context, end,
+ buffer_byte_length)),
&invalid_length, &call_init);
}
BIND(&call_init);
{
- Node* new_length =
- CallStub(div, context, new_byte_length.value(), element_size);
+ Node* new_length = CallBuiltin(Builtins::kDivide, context,
+ new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
new_length = ToSmiIndex(new_length, context, &invalid_length);
@@ -489,8 +488,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&start_offset_error);
{
Node* holder_map = LoadMap(holder);
- Node* problem_string = HeapConstant(
- factory()->NewStringFromAsciiChecked("start offset", TENURED));
+ Node* problem_string = StringConstant("start offset");
CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
problem_string);
@@ -500,8 +498,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&byte_length_error);
{
Node* holder_map = LoadMap(holder);
- Node* problem_string = HeapConstant(
- factory()->NewStringFromAsciiChecked("byte length", TENURED));
+ Node* problem_string = StringConstant("byte length");
CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
problem_string);
@@ -640,9 +637,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
{
// The {receiver} is not a valid JSTypedArray.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(
- factory()->NewStringFromAsciiChecked(method_name, TENURED)),
- receiver);
+ StringConstant(method_name), receiver);
Unreachable();
}
}
@@ -702,14 +697,12 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Goto(&throw_typeerror);
BIND(&if_receiverisneutered);
- var_message.Bind(
- SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ var_message.Bind(SmiConstant(MessageTemplate::kDetachedOperation));
Goto(&throw_typeerror);
BIND(&throw_typeerror);
{
- Node* method_arg = HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+ Node* method_arg = StringConstant(method_name);
Node* result = CallRuntime(Runtime::kThrowTypeError, context,
var_message.value(), method_arg);
Return(result);
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 773e5480ac..176a79965b 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -27,7 +27,7 @@ namespace {
int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
int64_t relative;
if (V8_LIKELY(num->IsSmi())) {
- relative = Smi::cast(*num)->value();
+ relative = Smi::ToInt(*num);
} else {
DCHECK(num->IsHeapNumber());
double fp = HeapNumber::cast(*num)->value();
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 88bbe8cd32..cb110bea95 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -13,15 +13,14 @@ namespace internal {
typedef compiler::Node Node;
TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
- Node* context = SmiConstant(Smi::kZero);
- TailCallRuntime(Runtime::kWasmStackGuard, context);
+ TailCallRuntime(Runtime::kWasmStackGuard, NoContextConstant());
}
#define DECLARE_ENUM(name) \
TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) { \
int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, \
- SmiConstant(Smi::kZero), SmiConstant(message_id)); \
+ TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, NoContextConstant(), \
+ SmiConstant(message_id)); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
#undef DECLARE_ENUM
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 4d5e83a9e0..3f98d4fb13 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -26,6 +26,32 @@ Builtins::Builtins() : initialized_(false) {
Builtins::~Builtins() {}
+BailoutId Builtins::GetContinuationBailoutId(Name name) {
+ switch (name) {
+#define BAILOUT_ID(NAME, ...) \
+ case k##NAME: \
+ return BailoutId(BailoutId::kFirstBuiltinContinuationId + name);
+ BUILTIN_LIST_TFJ(BAILOUT_ID);
+ BUILTIN_LIST_TFC(BAILOUT_ID);
+#undef BAILOUT_ID
+ default:
+ UNREACHABLE();
+ }
+}
+
+Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
+ switch (id.ToInt()) {
+#define BAILOUT_ID(NAME, ...) \
+ case BailoutId::kFirstBuiltinContinuationId + k##NAME: \
+ return k##NAME;
+ BUILTIN_LIST_TFJ(BAILOUT_ID)
+ BUILTIN_LIST_TFC(BAILOUT_ID)
+#undef BAILOUT_ID
+ default:
+ UNREACHABLE();
+ }
+}
+
void Builtins::TearDown() { initialized_ = false; }
void Builtins::IterateBuiltins(RootVisitor* v) {
@@ -79,7 +105,6 @@ Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
return NonPrimitiveToPrimitive_String();
}
UNREACHABLE();
- return Handle<Code>::null();
}
Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
@@ -90,7 +115,10 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
return OrdinaryToPrimitive_String();
}
UNREACHABLE();
- return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::builtin_handle(Name name) {
+ return Handle<Code>(reinterpret_cast<Code**>(builtin_address(name)));
}
// static
@@ -105,7 +133,6 @@ int Builtins::GetBuiltinParameterCount(Name name) {
#undef TFJ_CASE
default:
UNREACHABLE();
- return 0;
}
}
@@ -117,26 +144,64 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
switch (name) {
// This macro is deliberately crafted so as to emit very little code,
// in order to keep binary size of this function under control.
-#define CASE(Name, ...) \
+#define CASE_OTHER(Name, ...) \
case k##Name: { \
key = Builtin_##Name##_InterfaceDescriptor::key(); \
break; \
}
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE, CASE,
- CASE, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#undef CASE
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
+ CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#undef CASE_OTHER
case kConsoleAssert: {
return Callable(code, BuiltinDescriptor(isolate));
}
+ case kArrayForEach: {
+ Handle<Code> code = isolate->builtins()->ArrayForEach();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayForEachLoopEagerDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayForEachLoopEagerDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayForEachLoopLazyDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayForEachLoopLazyDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayMapLoopEagerDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayMapLoopEagerDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayMapLoopLazyDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayMapLoopLazyDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
default:
UNREACHABLE();
- return Callable(Handle<Code>::null(), VoidDescriptor(isolate));
}
CallInterfaceDescriptor descriptor(isolate, key);
return Callable(code, descriptor);
}
// static
+int Builtins::GetStackParameterCount(Isolate* isolate, Name name) {
+ switch (name) {
+#define CASE(Name, Count, ...) \
+ case k##Name: { \
+ return Count; \
+ }
+ BUILTIN_LIST_TFJ(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+// static
const char* Builtins::name(int index) {
switch (index) {
#define CASE(Name, ...) \
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index b5eebff73b..7ef7f257b8 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -18,6 +18,7 @@ class Handle;
class Isolate;
// Forward declarations.
+class BailoutId;
class RootVisitor;
enum class InterpreterPushArgsMode : unsigned;
namespace compiler {
@@ -43,23 +44,21 @@ class Builtins {
builtin_count
};
+ static BailoutId GetContinuationBailoutId(Name name);
+ static Name GetBuiltinFromBailoutId(BailoutId);
+
#define DECLARE_BUILTIN_ACCESSOR(Name, ...) \
V8_EXPORT_PRIVATE Handle<Code> Name();
BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
#undef DECLARE_BUILTIN_ACCESSOR
// Convenience wrappers.
- Handle<Code> CallFunction(
- ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+ Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
Handle<Code> InterpreterPushArgsThenCall(ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode,
InterpreterPushArgsMode mode);
Handle<Code> InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode);
Handle<Code> NewFunctionContext(ScopeType scope_type);
@@ -76,9 +75,13 @@ class Builtins {
return reinterpret_cast<Address>(&builtins_[name]);
}
+ Handle<Code> builtin_handle(Name name);
+
static int GetBuiltinParameterCount(Name name);
- static Callable CallableFor(Isolate* isolate, Name name);
+ V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
+
+ static int GetStackParameterCount(Isolate* isolate, Name name);
static const char* name(int index);
@@ -115,20 +118,20 @@ class Builtins {
Builtins();
static void Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
+ ConvertReceiverMode mode);
- static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode);
+ static void Generate_CallBoundFunctionImpl(MacroAssembler* masm);
- static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
- static void Generate_ForwardVarargs(MacroAssembler* masm, Handle<Code> code);
+ static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code);
+ static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code);
static void Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode);
+ InterpreterPushArgsMode mode);
static void Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode);
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index bcffedfef2..86e5ad509a 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -92,24 +92,6 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ jmp(ebx);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -211,13 +193,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ jmp(&post_instantiation_deopt_entry, Label::kNear);
@@ -325,16 +307,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &leave_frame, Label::kNear);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(Condition::zero, &use_receiver, Label::kNear);
} else {
+ __ j(not_zero, &use_receiver, Label::kNear);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ jmp(&use_receiver, Label::kNear);
}
@@ -423,7 +409,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(esi, Operand::StaticVariable(context_address));
@@ -488,33 +474,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- eax : the value to pass to the generator
// -- ebx : the JSGeneratorObject to resume
// -- edx : the resume mode (tagged)
- // -- ecx : the SuspendFlags of the earlier suspend call (tagged)
// -- esp[0] : return address
// -----------------------------------
- __ SmiUntag(ecx);
- __ AssertGeneratorObject(ebx, ecx);
+ __ AssertGeneratorObject(ebx);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ and_(ecx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpb(ecx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ j(equal, &async_await, Label::kNear);
-
__ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
kDontSaveFPRegs);
- __ jmp(&done_store_input, Label::kNear);
-
- __ bind(&async_await);
- __ mov(FieldOperand(ebx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- eax);
- __ RecordWriteField(ebx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- eax, ecx, kDontSaveFPRegs);
- __ jmp(&done_store_input, Label::kNear);
-
- __ bind(&done_store_input);
- // `ecx` no longer holds SuspendFlags
// Store resume mode into generator object.
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
@@ -563,7 +530,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ sub(ecx, Immediate(1));
__ j(carry, &done_loop, Label::kNear);
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ jmp(&loop);
@@ -668,6 +635,121 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ push(return_pc);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
+ __ j(not_equal, &no_match, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee if needed, and caller)
+ // -- edx : new target (preserved for callee if needed, and caller)
+ // -- edi : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = edi;
+ Register optimized_code_entry = scratch;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ mov(optimized_code_entry,
+ FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is an optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ cmp(optimized_code_entry,
+ Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ j(equal, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ cmp(
+ optimized_code_entry,
+ Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(equal, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ mov(optimized_code_entry,
+ FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, bailout to a
+ // given label.
+ Label found_deoptimized_code;
+ __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ __ push(eax);
+ __ push(edx);
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ edx, eax, feedback_vector);
+ __ pop(edx);
+ __ pop(eax);
+ __ jmp(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -685,9 +767,20 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = edi;
+ Register feedback_vector = ebx;
+
+ // Load the feedback vector from the closure.
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
+
// Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
+ // MANUAL indicates that the scope shouldn't actually generate code to set
+ // up the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@@ -695,27 +788,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = ecx;
- __ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ mov(optimized_code_entry,
- FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ mov(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -727,11 +807,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ EmitLoadFeedbackVector(ecx);
- __ add(
- FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
+ __ add(FieldOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -802,12 +881,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ push(ebx); // feedback_vector == ebx, so save it.
+ __ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
+ __ pop(ebx);
+ __ j(zero, &bytecode_array_loaded);
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -824,31 +910,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- __ push(edx);
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
- eax, ebx);
- __ pop(edx);
- __ leave();
- __ jmp(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- __ leave();
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -898,7 +959,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -933,19 +994,23 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ add(ecx, ebx);
Generate_InterpreterPushArgs(masm, ecx, ebx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(ebx); // Pass the spread in a register
+ __ sub(eax, Immediate(1)); // Subtract one for spread
+ }
+
// Call the target.
__ Push(edx); // Re-push return address.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1076,7 +1141,15 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Pop(edx);
__ Pop(edi);
- __ AssertUndefinedOrAllocationSite(ebx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ PopReturnAddressTo(ecx);
+ __ Pop(ebx); // Pass the spread in a register
+ __ PushReturnAddressFrom(ecx);
+ __ sub(eax, Immediate(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1158,8 +1231,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- __ LoadHeapObject(ebx,
- masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ Move(ebx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ push(ebx);
@@ -1219,6 +1291,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = edi;
+
+ // Get the feedback vector.
+ Register feedback_vector = ebx;
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1227,46 +1326,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = edi;
- Register new_target = edx;
- Register argument_count = eax;
+ Register feedback_vector = ebx;
// Do we have a valid feedback vector?
- __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = ecx;
- __ mov(entry,
- FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ test(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- __ push(argument_count);
- __ push(new_target);
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
- __ pop(new_target);
- __ pop(argument_count);
- __ jmp(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = ecx;
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1281,19 +1357,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ jmp(entry);
__ bind(&gotta_call_runtime);
-
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1435,31 +1501,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ popad();
+ // Preserve possible return result from lazy deopt.
+ __ push(eax);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(eax);
// Tear down internal frame.
}
__ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
+ __ ret(0); // Return to ContinueToBuiltin stub still on stack.
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ mov(Operand(esp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
+ eax);
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ mov(
+ ebp,
+ Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ const int offsetToPC =
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ __ pop(Operand(esp, offsetToPC));
+ __ Drop(offsetToPC / kPointerSize);
+ __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ ret(0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1517,7 +1622,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- esp[12] : receiver
// -----------------------------------
- // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // 1. Load receiver into edi, argArray into ebx (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1539,34 +1644,28 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argArray
+ // -- ebx : argArray
// -- edi : receiver
// -- esp[0] : return address
// -- esp[4] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ __ JumpIfRoot(ebx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &no_arguments,
Label::kNear);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1575,13 +1674,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Set(eax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1640,7 +1732,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // 1. Load target into edi (if present), argumentsList into ebx (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1661,35 +1753,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argumentsList
+ // -- ebx : argumentsList
// -- edi : target
// -- esp[0] : return address
// -- esp[4] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1702,7 +1781,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // 1. Load target into edi (if present), argumentsList into ebx (if present),
// new.target into edx (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1725,49 +1804,27 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argumentsList
+ // -- ebx : argumentsList
// -- edx : new.target
// -- edi : target
// -- esp[0] : return address
// -- esp[4] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
@@ -1939,7 +1996,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
@@ -2102,7 +2159,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
@@ -2152,97 +2209,22 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- eax : argumentsList
// -- edi : target
+ // -- eax : number of parameters on the stack (not including the receiver)
+ // -- ebx : arguments list (a FixedArray)
+ // -- ecx : len (number of elements to from args)
// -- edx : new.target (checked to be constructor or undefined)
// -- esp[0] : return address.
- // -- esp[4] : thisArgument
// -----------------------------------
+ __ AssertFixedArray(ebx);
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(eax, &create_runtime);
-
- // Load the map of argumentsList into ecx.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Load native context into ebx.
- __ mov(ebx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(edx);
- __ Pop(edi);
- __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
- __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ jmp(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(ecx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ j(above, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ // We need to preserve eax, edi and ebx.
+ __ movd(xmm0, edx);
+ __ movd(xmm1, edi);
+ __ movd(xmm2, eax);
// Check for stack overflow.
{
@@ -2251,66 +2233,56 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done;
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(ecx, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
+ __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ // Make edx the space we have left. The stack might already be overflowed
+ // here which will cause edx to become negative.
+ __ neg(edx);
+ __ add(edx, esp);
+ __ sar(edx, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, ebx);
+ __ cmp(edx, ecx);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- edi : target
- // -- eax : args (a FixedArray built from argumentsList)
- // -- ebx : len (number of elements to push from args)
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
+ // Push additional arguments onto the stack.
{
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
__ PopReturnAddressTo(edx);
- __ Move(ecx, Immediate(0));
+ __ Move(eax, Immediate(0));
Label done, push, loop;
__ bind(&loop);
- __ cmp(ecx, ebx);
+ __ cmp(eax, ecx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
__ mov(edi,
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ FieldOperand(ebx, eax, times_pointer_size, FixedArray::kHeaderSize));
__ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(edi);
- __ inc(ecx);
+ __ inc(eax);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- __ Move(eax, ebx);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Restore eax, edi and edx.
+ __ movd(eax, xmm2);
+ __ movd(edi, xmm1);
+ __ movd(edx, xmm0);
+
+ // Compute the actual parameter count.
+ __ add(eax, ecx);
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object)
@@ -2339,11 +2311,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Just load the length from the ArgumentsAdaptorFrame.
__ mov(edx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(edx);
}
__ bind(&arguments_done);
Label stack_done;
- __ SmiUntag(edx);
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
@@ -2389,100 +2361,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ movzx_b(scratch1,
- Operand::StaticVariable(is_tail_call_elimination_enabled));
- __ cmp(scratch1, Immediate(0));
- __ j(equal, &done, Label::kNear);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ mov(
- caller_args_count_reg,
- FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, 0);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -2493,21 +2374,19 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2573,15 +2452,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- esi : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- // Reload shared function info.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- }
-
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
__ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2682,18 +2554,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
// Patch the receiver to [[BoundThis]].
__ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2710,8 +2577,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -2721,35 +2587,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
- // Check if target has a [[Call]] internal method.
+ // Check if target is a proxy and call CallProxy external builtin
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
+ // Call CallProxy external builtin
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(eax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2759,7 +2615,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2771,178 +2627,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- // Free up some registers.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
-
- Register argc = eax;
-
- Register scratch = ecx;
- Register scratch2 = edi;
-
- Register spread = ebx;
- Register spread_map = edx;
-
- Register spread_len = edx;
-
- Label runtime_call, push_args;
- __ mov(spread, Operand(esp, kPointerSize));
- __ JumpIfSmi(spread, &runtime_call);
- __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
- __ mov(scratch2, NativeContextOperand());
- __ cmp(scratch,
- ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ mov(scratch2, NativeContextOperand());
- __ mov(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ cmp(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmp(scratch, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Need to save these on the stack.
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- __ Push(edi);
- __ Push(edx);
- __ SmiTag(argc);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, eax);
- __ Pop(argc);
- __ SmiUntag(argc);
- __ Pop(edx);
- __ Pop(edi);
- // Free up some registers.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ lea(argc, Operand(argc, spread_len, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ neg(scratch);
- __ add(scratch, esp);
- __ sar(scratch, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, spread_len);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- Register return_address = edi;
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(return_address);
- __ Pop(scratch);
-
- Register scratch2 = esi;
- __ movd(xmm2, esi);
-
- __ mov(scratch, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ j(equal, &done, Label::kNear);
- __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ inc(scratch);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(return_address);
- __ movd(esi, xmm2);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push edx to save it.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3066,19 +2750,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 24fe271cb3..4134d137a4 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -378,7 +378,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -423,22 +423,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -537,16 +521,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
t2, t3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Branch(&post_instantiation_deopt_entry);
@@ -653,18 +635,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ Branch(&use_receiver, eq, t2, Operand(zero_reg));
} else {
+ __ Branch(&use_receiver, ne, t2, Operand(zero_reg));
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ Branch(&use_receiver);
}
@@ -758,7 +742,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ li(cp, Operand(context_address));
__ lw(cp, MemOperand(cp));
@@ -830,33 +814,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
// -- a2 : the resume mode (tagged)
- // -- a3 : the SuspendFlags of the earlier suspend call (tagged)
// -- ra : return address
// -----------------------------------
- __ SmiUntag(a3);
- __ AssertGeneratorObject(a1, a3);
+ __ AssertGeneratorObject(a1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(t8, a3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Branch(&async_await, equal, t8,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
-
__ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ sw(v0, FieldMemOperand(
- a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `a3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
@@ -905,7 +871,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ Subu(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg));
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ Branch(&loop);
@@ -925,7 +891,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(a0);
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -1004,6 +969,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Addu(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee if needed, and caller)
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = a1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ lw(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ Branch(&fallthrough, eq, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Assert(
+ eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&fallthrough, hs, sp, Operand(at));
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ lw(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ lw(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // losure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1022,35 +1096,31 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Load the feedback vector from the closure.
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(a1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = t0;
- __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ lw(optimized_code_entry,
- FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ lw(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(a0));
- __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(t0, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1062,15 +1132,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ lw(t0, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ lw(t0,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ sw(t0,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1142,10 +1212,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ lw(t1, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ __ SmiUntag(t1);
+ __ And(t1, t1, Operand(DebugInfo::kHasBreakInfo));
+ __ Branch(&bytecode_array_loaded, eq, t1, Operand(zero_reg));
__ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
__ Branch(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1153,35 +1229,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(a1, t0, t1);
+ __ sw(t0, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, t0, t1);
__ Jump(t0);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ lw(t1,
- FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
- __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
- t2);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1222,7 +1275,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1245,17 +1298,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t4 and t1.
Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Subu(a0, a0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1287,7 +1344,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modified t4, t1 and t0.
Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
- __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Subu(a0, a0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
@@ -1415,6 +1478,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = a1;
+
+ // Get the feedback vector.
+ Register feedback_vector = a2;
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ Operand(at));
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1423,43 +1514,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = a1;
- Register index = a2;
+ Register feedback_vector = a2;
// Do we have a valid feedback vector?
- __ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ lw(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = t0;
- __ lw(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = t0;
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ lbu(t1, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ And(t1, t1,
- Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1476,15 +1547,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1620,30 +1682,68 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ Push(v0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ Pop(v0);
}
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to the ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ sw(v0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ lw(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(t0);
+ __ Addu(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t0);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1777,32 +1877,27 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
__ sw(a2, MemOperand(sp));
- __ mov(a0, a3);
+ __ mov(a2, a3);
}
// ----------- S t a t e -------------
- // -- a0 : argArray
+ // -- a2 : argArray
// -- a1 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(a1, &receiver_not_callable);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsCallable));
- __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg));
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a2, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a2, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1811,13 +1906,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mov(a0, zero_reg);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1895,34 +1983,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
__ sw(a2, MemOperand(sp));
- __ mov(a0, a3);
+ __ mov(a2, a3);
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(a1, &target_not_callable);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsCallable));
- __ Branch(&target_not_callable, eq, t0, Operand(zero_reg));
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1959,48 +2035,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ lw(a3, MemOperand(a0)); // new.target
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
- __ mov(a0, a2);
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a3 : new.target
// -- a1 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(a1, &target_not_constructor);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsConstructor));
- __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg));
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(a3, &new_target_not_constructor);
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsConstructor));
- __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg));
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ sw(a3, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2027,149 +2081,59 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a0 : argumentsList
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- t0 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
// -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(a0, &create_runtime);
-
- // Load the map of argumentsList into a2.
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
-
- // Load native context into t0.
- __ lw(t0, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
- __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
-
- // Check if argumentsList is a fast JSArray.
- __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3, a0);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(a0, v0);
- __ Pop(a1, a3);
- __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
- }
- __ Branch(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
- __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
- __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ SmiUntag(a2);
- __ mov(a0, t0);
- __ Branch(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
- __ lw(at, ContextMemOperand(t0, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&create_runtime, ne, a2,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
- __ Branch(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(t1);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
- __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
- __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
-
- __ bind(&done_create);
- }
+ __ AssertFixedArray(a2);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(t0, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
- __ Subu(t0, sp, t0);
+ __ Subu(t1, sp, t1);
// Check if the arguments will overflow the stack.
- __ sll(at, a2, kPointerSizeLog2);
- __ Branch(&done, gt, t0, Operand(at)); // Signed comparison.
+ __ sll(at, t0, kPointerSizeLog2);
+ __ Branch(&done, gt, t1, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a0 : args (a FixedArray built from argumentsList)
- // -- a2 : len (number of elements to push from args)
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(t0, zero_reg);
+ __ mov(t2, zero_reg);
Label done, push, loop;
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
- __ Branch(&done, eq, t0, Operand(a2));
- __ Lsa(at, a0, t0, kPointerSizeLog2);
+ __ Branch(&done, eq, t2, Operand(t0));
+ __ Lsa(at, a2, t2, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(at));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(at);
- __ Addu(t0, t0, Operand(1));
+ __ Addu(t2, t2, Operand(1));
__ Branch(&loop);
__ bind(&done);
- __ Move(a0, t0);
+ __ Addu(a0, a0, t2);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- Label construct;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&construct, ne, a3, Operand(at));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ bind(&construct);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a3 : the new.target (for [[Construct]] calls)
@@ -2195,11 +2159,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Just get the length from the ArgumentsAdaptorFrame.
__ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(t2);
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
- __ SmiUntag(t2);
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
@@ -2229,101 +2193,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ li(at, Operand(is_tail_call_elimination_enabled));
- __ lb(scratch1, MemOperand(at));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ lw(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ lw(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ lw(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ lw(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -2334,21 +2206,20 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
// ----------- S t a t e -------------
@@ -2413,13 +2284,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sra(a2, a2, kSmiTagSize); // Un-tag.
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2435,18 +2301,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
// Patch the receiver to [[BoundThis]].
{
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
@@ -2528,8 +2389,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2539,9 +2399,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
// Check if target has a [[Call]] internal method.
@@ -2549,21 +2409,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ And(t1, t1, Operand(1 << Map::kIsCallable));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
+ // Check if target is a proxy and call CallProxy external builtin
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(a1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Addu(a0, a0, 2);
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ li(t2, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ lw(t2, MemOperand(t2));
+ __ Jump(t2, Operand(Code::kHeaderSize - kHeapObjectTag));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2574,7 +2424,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2586,151 +2436,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = a0;
- Register constructor = a1;
- Register new_target = a3;
-
- Register scratch = t0;
- Register scratch2 = t1;
-
- Register spread = a2;
- Register spread_map = t3;
-
- Register spread_len = t3;
-
- Register native_context = t4;
-
- Label runtime_call, push_args;
- __ lw(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
- __ lw(native_context, NativeContextMemOperand());
-
- // Check that the spread is an array.
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
- __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that we have the original ArrayPrototype.
- __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ lw(scratch2, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- // Check that the map of the initial array iterator hasn't changed.
- __ lw(scratch,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ lw(scratch2,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
- // For non-FastHoley kinds, we can skip the protector check.
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ Branch(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, v0);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Addu(argc, argc, spread_len);
- __ Subu(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause ip to become negative.
- __ Subu(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ sll(at, spread_len, kPointerSizeLog2);
- __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, zero_reg);
- Label done, push, loop;
- __ bind(&loop);
- __ Branch(&done, eq, scratch, Operand(spread_len));
- __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
- __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Addu(scratch, scratch, Operand(1));
- __ Branch(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the target to call (can be any Object).
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push a3 to save it.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2911,19 +2616,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (can be any Object)
- // -- a3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 4d80993952..5af11c3fc5 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -228,7 +228,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -380,7 +380,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -426,22 +426,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(a4, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(a4));
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -541,16 +525,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
t2, t3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Branch(&post_instantiation_deopt_entry);
@@ -657,18 +639,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ Branch(&use_receiver, eq, t2, Operand(zero_reg));
} else {
+ __ Branch(&use_receiver, ne, t2, Operand(zero_reg));
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ Branch(&use_receiver);
}
@@ -716,32 +700,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
// -- a2 : the resume mode (tagged)
- // -- a3 : the SuspendFlags of the earlier suspend call (tagged)
// -- ra : return address
// -----------------------------------
- __ SmiUntag(a3);
- __ AssertGeneratorObject(a1, a3);
+ __ AssertGeneratorObject(a1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(t8, a3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Branch(&async_await, equal, t8,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
-
__ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ Sd(v0, FieldMemOperand(
- a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `a3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ Sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
@@ -892,7 +858,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ li(cp, Operand(context_address));
__ Ld(cp, MemOperand(cp));
@@ -1004,6 +970,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Daddu(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee if needed, and caller)
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = a1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ Branch(&fallthrough, eq, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Assert(
+ eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&fallthrough, hs, sp, Operand(t0));
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ Lw(a5, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // losure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1022,35 +1097,31 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(a1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = a4;
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ Ld(optimized_code_entry,
- FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ld(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(a0));
- __ Ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1062,15 +1133,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ Ld(a4, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Ld(a4,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ Sd(a4, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Sd(a4,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1142,10 +1213,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
+ __ SmiUntag(a5);
+ __ And(a5, a5, Operand(DebugInfo::kHasBreakInfo));
+ __ Branch(&bytecode_array_loaded, eq, a5, Operand(zero_reg));
__ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
__ Branch(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1153,35 +1230,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(a1, a4, a5);
+ __ Sd(a4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, a4, a5);
__ Jump(a4);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ Lw(a5,
- FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
- __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
- t0);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1222,7 +1276,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1245,17 +1299,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t0 and a4.
Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1287,7 +1345,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modifies t0, a4 and a5.
Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
- __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
@@ -1416,6 +1480,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = a1;
+
+ // Get the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ Operand(at));
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1424,43 +1516,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = a1;
- Register index = a2;
+ Register feedback_vector = a2;
// Do we have a valid feedback vector?
- __ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = a4;
- __ Ld(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ Lw(a5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ Lbu(a5, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ And(a5, a5,
- Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1477,15 +1549,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1621,30 +1684,68 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(v0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(v0);
}
__ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to the ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ Sd(v0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ Ld(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(t0);
+ __ Daddu(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t0);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1759,14 +1860,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -----------------------------------
Register argc = a0;
- Register arg_array = a0;
+ Register arg_array = a2;
Register receiver = a1;
- Register this_arg = a2;
+ Register this_arg = a5;
Register undefined_value = a3;
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1786,29 +1887,24 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argArray
+ // -- a2 : argArray
// -- a1 : receiver
// -- a3 : undefined root value
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(receiver, &receiver_not_callable);
- __ Ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsCallable));
- __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
__ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
__ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- DCHECK(undefined_value.is(a3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1818,13 +1914,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
DCHECK(receiver.is(a1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ Sd(receiver, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1879,14 +1968,14 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -----------------------------------
Register argc = a0;
- Register arguments_list = a0;
+ Register arguments_list = a2;
Register target = a1;
- Register this_argument = a2;
+ Register this_argument = a5;
Register undefined_value = a3;
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1910,31 +1999,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- a3 : undefined root value
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(target, &target_not_callable);
- __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsCallable));
- __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- DCHECK(undefined_value.is(a3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ Sd(target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1946,13 +2023,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
Register argc = a0;
- Register arguments_list = a0;
+ Register arguments_list = a2;
Register target = a1;
Register new_target = a3;
Register undefined_value = a4;
Register scratch = a5;
- // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1977,44 +2054,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- a3 : new.target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(target, &target_not_constructor);
- __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsConstructor));
- __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(new_target, &new_target_not_constructor);
- __ Ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsConstructor));
- __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ Sd(target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ Sd(new_target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2043,135 +2099,45 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a0 : argumentsList
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(a2);
- Register arguments_list = a0;
- Register target = a1;
- Register new_target = a3;
-
- Register args = a0;
- Register len = a2;
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(arguments_list, &create_runtime);
-
- // Load the map of argumentsList into a2.
- Register arguments_list_map = a2;
- __ Ld(arguments_list_map,
- FieldMemOperand(arguments_list, HeapObject::kMapOffset));
-
- // Load native context into a4.
- Register native_context = a4;
- __ Ld(native_context, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ Ld(at, ContextMemOperand(native_context,
- Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
- __ Ld(at, ContextMemOperand(native_context,
- Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
-
- // Check if argumentsList is a fast JSArray.
- __ Lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(target, new_target, arguments_list);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(arguments_list, v0);
- __ Pop(target, new_target);
- __ Lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
- }
- __ Branch(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ Lw(len, UntagSmiFieldMemOperand(arguments_list,
- JSArgumentsObject::kLengthOffset));
- __ Ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
- __ Lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
- __ Branch(&create_runtime, ne, len, Operand(at));
- __ mov(args, a4);
-
- __ Branch(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ Ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
- __ Ld(at, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ Lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&create_runtime, ne, a2,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ Lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
- __ Ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ Branch(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ Lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(t1);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
- __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
- __ Lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ Register args = a2;
+ Register len = a4;
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(a4, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
- __ Dsubu(a4, sp, a4);
+ __ Dsubu(a5, sp, a5);
// Check if the arguments will overflow the stack.
__ dsll(at, len, kPointerSizeLog2);
- __ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
+ __ Branch(&done, gt, a5, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a0 : args (a FixedArray built from argumentsList)
- // -- a2 : len (number of elements to push from args)
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label done, push, loop;
- Register src = a4;
+ Register src = a6;
Register scratch = len;
__ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
- __ mov(a0, len); // The 'len' argument for Call() or Construct().
+ __ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct().
__ dsll(scratch, len, kPointerSizeLog2);
__ Dsubu(scratch, sp, Operand(scratch));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
@@ -2186,31 +2152,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a0 : argument count (len)
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefinded)
- // -- sp[0] : args[len-1]
- // -- sp[8] : args[len-2]
- // ... : ...
- // -- sp[8*(len-2)] : args[1]
- // -- sp[8*(len-1)] : args[0]
- // ----------------------------------
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- Label construct;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&construct, ne, a3, Operand(at));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ bind(&construct);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a3 : the new.target (for [[Construct]] calls)
@@ -2270,99 +2218,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ li(at, Operand(is_tail_call_elimination_enabled));
- __ Lb(scratch1, MemOperand(at));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ Ld(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Lw(caller_args_count_reg,
- UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ Ld(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ Ld(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -2373,21 +2231,20 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
// ----------- S t a t e -------------
@@ -2452,10 +2309,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
__ Lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
@@ -2473,18 +2326,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
// Patch the receiver to [[BoundThis]].
{
__ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
@@ -2565,8 +2413,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2576,9 +2423,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
// Check if target has a [[Call]] internal method.
@@ -2587,20 +2434,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(a1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Daddu(a0, a0, 2);
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ li(t2, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ Ld(t2, MemOperand(t2));
+ __ Daddu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2611,7 +2448,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2623,150 +2460,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = a0;
- Register constructor = a1;
- Register new_target = a3;
-
- Register scratch = t0;
- Register scratch2 = t1;
-
- Register spread = a2;
- Register spread_map = a4;
-
- Register spread_len = a4;
-
- Register native_context = a5;
-
- Label runtime_call, push_args;
- __ Ld(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ Ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
- __ Ld(native_context, NativeContextMemOperand());
-
- // Check that the spread is an array.
- __ Lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
- __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that we have the original ArrayPrototype.
- __ Ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ Ld(scratch2, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- // Check that the map of the initial array iterator hasn't changed.
- __ Ld(scratch,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ Ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ Ld(scratch2,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ Lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
- // For non-FastHoley kinds, we can skip the protector check.
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ Lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
- __ Ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ Branch(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, v0);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ Lw(spread_len,
- UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Daddu(argc, argc, spread_len);
- __ Dsubu(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause ip to become negative.
- __ Dsubu(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ dsll(at, spread_len, kPointerSizeLog2);
- __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, zero_reg);
- Label done, push, loop;
- __ bind(&loop);
- __ Branch(&done, eq, scratch, Operand(spread_len));
- __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
- __ Ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Daddu(scratch, scratch, Operand(1));
- __ Branch(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the target to call (can be any Object).
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push a3 to save it.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2946,19 +2639,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (can be any Object)
- // -- a3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index dc2221e10b..33d734f3bb 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
@@ -379,7 +379,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
@@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bge(&ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -552,16 +535,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7,
- FunctionKind::kDerivedConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r7, SharedFunctionInfo::kDerivedConstructorMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r7, r8);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -679,10 +659,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7,
- FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r7, SharedFunctionInfo::kClassConstructorMask, r0);
__ beq(&use_receiver, cr0);
} else {
@@ -739,37 +716,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r3 : the value to pass to the generator
// -- r4 : the JSGeneratorObject to resume
// -- r5 : the resume mode (tagged)
- // -- r6 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r6);
- __ AssertGeneratorObject(r4, r6);
+ __ AssertGeneratorObject(r4);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ andi(r6, r6,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpi(r6, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ beq(&async_await);
-
__ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&async_await);
- __ StoreP(
- r3,
- FieldMemOperand(r4, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- r0);
- __ RecordWriteField(r4, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r3, r6, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&done_store_input);
- // `r6` no longer holds SuspendFlags
// Store resume mode into generator object.
__ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
@@ -823,13 +778,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label loop, done_loop;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-#if V8_TARGET_ARCH_PPC64
__ cmpi(r3, Operand::Zero());
__ beq(&done_loop);
-#else
- __ SmiUntag(r3, SetRC);
- __ beq(&done_loop, cr0);
-#endif
__ mtctr(r3);
__ bind(&loop);
__ push(ip);
@@ -927,7 +877,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
@@ -1044,6 +994,121 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r4;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bge(&fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadWordArith(
+ scratch2,
+ FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code, cr0);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ mr(ip, optimized_code_entry);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1063,43 +1128,35 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r4;
+ Register feedback_vector = r5;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r4);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r7;
- __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- Label array_done;
- Register debug_info = r5;
- DCHECK(!debug_info.is(r3));
- __ LoadP(debug_info,
- FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- __ TestIfSmi(debug_info, r0);
- __ beq(&array_done, cr0);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ bind(&array_done);
+ __ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r7, r0);
+ __ bne(&maybe_load_debug_bytecode_array, cr0);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1111,16 +1168,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
- __ LoadP(r8, FieldMemOperand(
- r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r8, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
- __ StoreP(r8, FieldMemOperand(
- r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- r0);
+ __ StoreP(
+ r8,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ r0);
// Check function data field is actually a BytecodeArray object.
@@ -1193,40 +1251,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ Label done;
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ip);
+ __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo));
+ __ beq(&done, cr0);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
+ __ bind(&done);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
__ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r4, r7, r8);
+ __ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r7, r8);
__ JumpToJSEntry(r7);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ lwz(r8, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&gotta_call_runtime, cr0);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
- r5);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1260,7 +1309,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1284,17 +1333,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r5, r6, r7 will be modified.
Generate_InterpreterPushArgs(masm, r6, r5, r6, r7);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r5); // Pass the spread in a register
+ __ subi(r3, r3, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1330,8 +1383,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. r8, r7, r9 will be modified.
Generate_InterpreterPushArgs(masm, r3, r7, r3, r9);
__ bind(&skip);
-
- __ AssertUndefinedOrAllocationSite(r5, r8);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r5); // Pass the spread in a register
+ __ subi(r3, r3, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+ }
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r4);
@@ -1461,6 +1518,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r4;
+
+ // Get the feedback vector.
+ Register feedback_vector = r5;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1469,43 +1554,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r4;
- Register index = r5;
+ Register feedback_vector = r5;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r7;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ lwz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&gotta_call_runtime, cr0);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r7;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ lbz(r8, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
- __ bne(&gotta_call_runtime, cr0);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1523,15 +1590,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1674,30 +1732,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(r3);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r3);
}
__ addi(sp, sp, Operand(kPointerSize)); // Ignore state
- __ blr(); // Jump to miss handler
+ __ blr(); // Jump to ContinueToBuiltin stub
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ StoreP(
+ r3, MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ LoadP(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip);
+ __ addi(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(r0);
+ __ mtlr(r0);
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1824,52 +1922,47 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r4, argArray into r3 (if present), remove all
+ // 1. Load receiver into r4, argArray into r5 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ mr(scratch, r3);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ mr(r5, scratch);
__ LoadP(r4, MemOperand(new_sp, 0)); // receiver
__ cmpi(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r3 : argArray
+ // -- r5 : argArray
// -- r4 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r4, &receiver_not_callable);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&receiver_not_callable, cr0);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r5, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1878,13 +1971,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ li(r3, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1940,19 +2026,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // 1. Load target into r4 (if present), argumentsList into r5 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mr(scratch, r4);
- __ mr(r3, r4);
+ __ mr(r5, r4);
__ cmpi(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
@@ -1960,37 +2046,25 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ cmpi(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r3 : argumentsList
+ // -- r5 : argumentsList
// -- r4 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r4, &target_not_callable);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&target_not_callable, cr0);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -2002,18 +2076,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // 1. Load target into r4 (if present), argumentsList into r5 (if present),
// new.target into r6 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mr(r3, r4);
+ __ mr(r5, r4);
__ mr(r6, r4);
__ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
__ cmpi(arg_size, Operand(kPointerSize));
@@ -2021,7 +2095,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ mr(r6, r4); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ cmpi(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
@@ -2030,44 +2104,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- r3 : argumentsList
+ // -- r5 : argumentsList
// -- r6 : new.target
// -- r4 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r4, &target_not_constructor);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsConstructor, r0);
- __ beq(&target_not_constructor, cr0);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r6, &new_target_not_constructor);
- __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsConstructor, r0);
- __ beq(&new_target_not_constructor, cr0);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ StoreP(r6, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2099,99 +2152,17 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r3 : argumentsList
- // -- r4 : target
- // -- r6 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r4 : target
+ // -- r3 : number of parameters on the stack (not including the receiver)
+ // -- r5 : arguments list (a FixedArray)
+ // -- r7 : len (number of elements to push from args)
+ // -- r6 : new.target (for [[Construct]])
// -----------------------------------
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r3, &create_runtime);
-
- // Load the map of argumentsList into r5.
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
-
- // Load native context into r7.
- __ LoadP(r7, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r5);
- __ beq(&create_arguments);
- __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r5);
- __ beq(&create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
- __ beq(&create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r6, r3);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r4, r6);
- __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ SmiUntag(r5);
- }
- __ b(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
- __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
- __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
- __ cmp(r5, ip);
- __ bne(&create_runtime);
- __ SmiUntag(r5);
- __ mr(r3, r7);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ LoadP(r5, FieldMemOperand(r5, Map::kPrototypeOffset));
- __ LoadP(r7, ContextMemOperand(r7, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(r5, r7);
- __ bne(&create_runtime);
- __ LoadRoot(r7, Heap::kArrayProtectorRootIndex);
- __ LoadP(r5, FieldMemOperand(r7, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(r5, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&create_runtime);
- __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ SmiUntag(r5);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r5 and r7 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ lbz(r8, FieldMemOperand(r5, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r8);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmpi(r8, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ TestBit(r8, Map::kHasNonInstancePrototype, r0);
- __ bne(&create_holey_array, cr0);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ SmiUntag(r5);
-
- __ bind(&done_create);
- }
-
+ __ AssertFixedArray(r5);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2202,53 +2173,40 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ sub(ip, sp, ip);
// Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r0, r7, Operand(kPointerSizeLog2));
__ cmp(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r4 : target
- // -- r3 : args (a FixedArray built from argumentsList)
- // -- r5 : len (number of elements to push from args)
- // -- r6 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
Label loop, no_args, skip;
- __ cmpi(r5, Operand::Zero());
+ __ cmpi(r7, Operand::Zero());
__ beq(&no_args);
- __ addi(r3, r3,
+ __ addi(r5, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r5);
+ __ mtctr(r7);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r3, kPointerSize));
+ __ LoadPU(ip, MemOperand(r5, kPointerSize));
__ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
- __ mr(ip, r9);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ bind(&skip);
__ push(ip);
__ bdnz(&loop);
__ bind(&no_args);
- __ mr(r3, r5);
+ __ add(r3, r3, r7);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r6 : the new.target (for [[Construct]] calls)
@@ -2275,16 +2233,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-#if V8_TARGET_ARCH_PPC64
__ SmiUntag(r8);
-#endif
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r8);
-#endif
__ sub(r8, r8, r5);
__ cmpi(r8, Operand::Zero());
__ ble(&stack_done);
@@ -2317,107 +2270,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ lbz(scratch1, MemOperand(scratch1));
- __ cmpi(scratch1, Operand::Zero());
- __ beq(&done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ LoadP(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ bne(&no_interpreter_frame);
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mr(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ LoadP(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ LoadP(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(caller_args_count_reg);
-#endif
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
@@ -2429,9 +2284,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2440,8 +2293,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
+ __ andi(r0, r6,
+ Operand(SharedFunctionInfo::IsStrictBit::kMask |
+ SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert, cr0);
{
// ----------- S t a t e -------------
@@ -2506,15 +2360,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
__ LoadWordArith(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
ParameterCount actual(r3);
ParameterCount expected(r5);
__ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2612,18 +2459,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
@@ -2643,8 +2485,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -2654,10 +2495,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2665,22 +2506,14 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ TestBit(r7, Map::kIsCallable, r0);
__ beq(&non_callable, cr0);
+ // Check if target is a proxy and call CallProxy external builtin
__ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r4);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ addi(r3, r3, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r8, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ LoadP(r8, MemOperand(r8));
+ __ addi(r8, r8, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r8);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2691,7 +2524,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2703,156 +2536,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r3;
- Register constructor = r4;
- Register new_target = r6;
-
- Register scratch = r5;
- Register scratch2 = r9;
-
- Register spread = r7;
- Register spread_map = r8;
- Register spread_len = r8;
- Label runtime_call, push_args;
- __ LoadP(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ bne(&runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(scratch, scratch2);
- __ bne(&runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch, scratch2);
- __ bne(&runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ lbz(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmpi(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmpi(scratch, Operand(FAST_SMI_ELEMENTS));
- __ beq(&no_protector_check);
- __ cmpi(scratch, Operand(FAST_ELEMENTS));
- __ beq(&no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mr(spread, r3);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ add(argc, argc, spread_len);
- __ subi(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, spread_len, Operand(kPointerSizeLog2));
- __ cmp(scratch, r0);
- __ bgt(&done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ li(scratch, Operand::Zero());
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ beq(&done);
- __ ShiftLeftImm(r0, scratch, Operand(kPointerSizeLog2));
- __ add(scratch2, spread, r0);
- __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ addi(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r6 to save it.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2973,18 +2656,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (can be any Object)
- // -- r6 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2148f11105..f6bd0af3bf 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -226,7 +226,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -376,7 +376,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -424,22 +424,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -543,16 +527,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kDerivedConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r6, r7);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -669,10 +650,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -726,35 +704,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r2 : the value to pass to the generator
// -- r3 : the JSGeneratorObject to resume
// -- r4 : the resume mode (tagged)
- // -- r5 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r5);
- __ AssertGeneratorObject(r3, r5);
+ __ AssertGeneratorObject(r3);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ tmll(r5, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ b(Condition(1), &async_await);
-
__ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&async_await);
- __ StoreP(
- r2,
- FieldMemOperand(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- r0);
- __ RecordWriteField(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r2, r5, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&done_store_input);
- // `r5` no longer holds SuspendFlags
// Store resume mode into generator object.
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
@@ -811,7 +769,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CmpP(r2, Operand::Zero());
__ beq(&done_loop);
#else
- __ SmiUntag(r2);
__ LoadAndTestP(r2, r2);
__ beq(&done_loop);
#endif
@@ -913,7 +870,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
@@ -1036,6 +993,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r3;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&fallthrough, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadW(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1055,43 +1124,35 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r3;
+ Register feedback_vector = r4;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r3);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r6;
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- Label array_done;
- Register debug_info = r4;
- DCHECK(!debug_info.is(r2));
- __ LoadP(debug_info,
- FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- __ TestIfSmi(debug_info);
- __ beq(&array_done);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ bind(&array_done);
+ __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r6);
+ __ bne(&maybe_load_debug_bytecode_array);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1102,15 +1163,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
- __ LoadP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
- __ StoreP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ StoreP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1184,40 +1245,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ Label done;
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ip);
+ __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
+ __ beq(&done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
+ __ bind(&done);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
__ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r3, r6, r7);
+ __ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r6, r7);
__ JumpToJSEntry(r6);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
- r4);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1253,7 +1305,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1275,18 +1327,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments.
Generate_InterpreterPushArgs(masm, r5, r4, r5, r6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ }
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1322,7 +1377,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, r2, r6, r2, r7);
__ bind(&skip);
- __ AssertUndefinedOrAllocationSite(r4, r7);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r4, r7);
+ }
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r3);
@@ -1451,6 +1511,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r3;
+
+ // Get the feedback vector.
+ Register feedback_vector = r4;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1459,43 +1547,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r3;
- Register index = r4;
+ Register feedback_vector = r4;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r6;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ LoadlB(r7, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
- __ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1513,15 +1583,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1668,30 +1729,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Preserve possible return result from lazy deopt.
+ __ push(r2);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r2);
+ }
+
+ __ AddP(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Ret(); // Jump to ContinueToBuiltin stub
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ StoreP(
+ r2, MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
}
+ __ LoadP(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip);
+ __ AddP(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(r0);
+ __ LoadRR(r14, r0);
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+} // namespace
- __ la(sp, MemOperand(sp, kPointerSize)); // Ignore state
- __ Ret(); // Jump to miss handler
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1811,52 +1912,47 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r3, argArray into r2 (if present), remove all
+ // 1. Load receiver into r3, argArray into r4 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ LoadRR(scratch, r2);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argArray
+ // -- r4 : argArray
// -- r3 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r3, &receiver_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1865,13 +1961,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadImmP(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1927,19 +2016,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRR(scratch, r3);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
@@ -1947,37 +2036,25 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r3 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r3, &target_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&target_not_callable);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3 Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1989,18 +2066,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// new.target into r5 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
__ CmpP(arg_size, Operand(kPointerSize));
@@ -2008,7 +2085,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
@@ -2017,44 +2094,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r5 : new.target
// -- r3 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r3, &target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&target_not_constructor);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&new_target_not_constructor);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ StoreP(r5, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2095,99 +2151,17 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r2 : argumentsList
- // -- r3 : target
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r3 : target
+ // -- r2 : number of parameters on the stack (not including the receiver)
+ // -- r4 : arguments list (a FixedArray)
+ // -- r6 : len (number of elements to push from args)
+ // -- r5 : new.target (for [[Construct]])
// -----------------------------------
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r2, &create_runtime);
-
- // Load the map of argumentsList into r4.
- __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- // Load native context into r6.
- __ LoadP(r6, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
- __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
- __ beq(&create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r5, r2);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r3, r5);
- __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ SmiUntag(r4);
- }
- __ b(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
- __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
- __ CmpP(r4, ip);
- __ bne(&create_runtime);
- __ SmiUntag(r4);
- __ LoadRR(r2, r6);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
- __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(r4, r6);
- __ bne(&create_runtime);
- __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
- __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&create_runtime);
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r4 and r6 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r7);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
- __ bne(&create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
-
- __ bind(&done_create);
- }
-
+ __ AssertFixedArray(r4);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2198,54 +2172,41 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ SubP(ip, sp, ip);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2));
__ CmpP(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r3 : target
- // -- r2 : args (a FixedArray built from argumentsList)
- // -- r4 : len (number of elements to push from args)
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
Label loop, no_args, skip;
- __ CmpP(r4, Operand::Zero());
+ __ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(r2, r2,
+ __ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ LoadRR(r1, r4);
+ __ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r2, kPointerSize));
- __ la(r2, MemOperand(r2, kPointerSize));
+ __ LoadP(ip, MemOperand(r4, kPointerSize));
+ __ la(r4, MemOperand(r4, kPointerSize));
__ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
__ bne(&skip, Label::kNear);
- __ LoadRR(ip, r8);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ bind(&skip);
__ push(ip);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
- __ LoadRR(r2, r4);
+ __ AddP(r2, r2, r6);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r5 : the new.target (for [[Construct]] calls)
@@ -2271,16 +2232,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
-#if V8_TARGET_ARCH_S390X
__ SmiUntag(r7);
-#endif
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r7);
-#endif
__ SubP(r7, r7, r4);
__ CmpP(r7, Operand::Zero());
__ ble(&stack_done);
@@ -2313,106 +2269,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is active.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ LoadlB(scratch1, MemOperand(scratch1));
- __ CmpP(scratch1, Operand::Zero());
- __ beq(&done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ LoadP(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ bne(&no_interpreter_frame);
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ LoadRR(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ LoadP(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ LoadP(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(caller_args_count_reg);
-#endif
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
@@ -2424,9 +2283,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2435,8 +2292,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
+ __ AndP(r0, r5,
+ Operand(SharedFunctionInfo::IsStrictBit::kMask |
+ SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert);
{
// ----------- S t a t e -------------
@@ -2501,15 +2359,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
__ LoadW(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r4);
-#endif
ParameterCount actual(r2);
ParameterCount expected(r4);
__ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2609,18 +2460,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r3);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
@@ -2640,8 +2486,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
@@ -2651,10 +2496,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2662,22 +2507,14 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ TestBit(r6, Map::kIsCallable);
__ beq(&non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r3);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ AddP(r2, r2, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r7, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ LoadP(r7, MemOperand(r7));
+ __ AddP(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r7);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2688,7 +2525,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2700,156 +2537,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r2;
- Register constructor = r3;
- Register new_target = r5;
-
- Register scratch = r4;
- Register scratch2 = r8;
-
- Register spread = r6;
- Register spread_map = r7;
- Register spread_len = r7;
- Label runtime_call, push_args;
- __ LoadP(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ bne(&runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
- __ beq(&no_protector_check);
- __ CmpP(scratch, Operand(FAST_ELEMENTS));
- __ beq(&no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ LoadRR(spread, r2);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ AddP(argc, argc, spread_len);
- __ SubP(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ SubP(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
- __ CmpP(scratch, r0);
- __ bgt(&done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ LoadImmP(scratch, Operand::Zero());
- Label done, push, loop;
- __ bind(&loop);
- __ CmpP(scratch, spread_len);
- __ beq(&done);
- __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
- __ AddP(scratch2, spread, r0);
- __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ AddP(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r5 to save it.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2970,18 +2657,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -- r5 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index ca88e6332b..a191bcadf5 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -41,13 +41,16 @@ Code* BuildWithMacroAssembler(Isolate* isolate,
MacroAssemblerGenerator generator,
Code::Flags flags, const char* s_name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
DCHECK(!masm.has_frame());
generator(&masm);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, s_name);
@@ -58,13 +61,16 @@ Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
Builtins::ExitFrameType exit_frame_type, Code::Flags flags,
const char* name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
DCHECK(!masm.has_frame());
Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, name);
@@ -76,6 +82,9 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
CodeAssemblerGenerator generator, int argc,
Code::Flags flags, const char* name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
@@ -94,6 +103,9 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
Code::Flags flags, const char* name,
int result_size) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index d4fb131afc..bedfcfc59c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -98,22 +98,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ jmp(rbx);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -215,13 +199,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ jmp(&post_instantiation_deopt_entry, Label::kNear);
@@ -328,16 +312,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &leave_frame, Label::kNear);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
- __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(Condition::zero, &use_receiver, Label::kNear);
} else {
+ __ j(not_zero, &use_receiver, Label::kNear);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ jmp(&use_receiver, Label::kNear);
}
@@ -452,7 +440,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ movp(rsi, masm->ExternalOperand(context_address));
@@ -489,7 +477,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ movp(rsi, masm->ExternalOperand(context_address));
@@ -563,34 +551,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- rax : the value to pass to the generator
// -- rbx : the JSGeneratorObject to resume
// -- rdx : the resume mode (tagged)
- // -- rcx : the SuspendFlags of the earlier suspend call (tagged)
// -- rsp[0] : return address
// -----------------------------------
- // Untag rcx
- __ shrq(rcx, Immediate(kSmiTagSize + kSmiShiftSize));
- __ AssertGeneratorObject(rbx, rcx);
+ __ AssertGeneratorObject(rbx);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ andb(rcx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpb(rcx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ j(equal, &async_await);
-
__ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
kDontSaveFPRegs);
- __ j(always, &done_store_input, Label::kNear);
-
- __ bind(&async_await);
- __ movp(
- FieldOperand(rbx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- rax);
- __ RecordWriteField(rbx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- rax, rcx, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `rcx` no longer holds SuspendFlags
// Store resume mode into generator object.
__ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
@@ -637,8 +605,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// values have already been copied into the context and these dummy values
// will never be used.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movl(rcx,
+ FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
{
Label done_loop, loop;
__ bind(&loop);
@@ -661,8 +629,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movsxlq(rax, FieldOperand(
+ rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -747,6 +715,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ SmiCompare(smi_entry, Smi::FromEnum(marker));
+ __ j(not_equal, &no_match, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee if needed, and caller)
+ // -- rdx : new target (preserved for callee if needed, and caller)
+ // -- rdi : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
+ scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = rdi;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ movp(optimized_code_entry,
+ FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ SmiCompare(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone));
+ __ j(equal, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ SmiCompare(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
+ __ Assert(equal, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ movp(optimized_code_entry,
+ FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ testl(
+ FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ jmp(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -764,6 +843,17 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = rdi;
+ Register feedback_vector = rbx;
+
+ // Load the feedback vector from the closure.
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -774,26 +864,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = rcx;
- __ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ movp(rbx,
- FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+ &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -805,11 +883,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
- __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
- FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
+ FieldOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
Smi::FromInt(1));
// Check function data field is actually a BytecodeArray object.
@@ -881,12 +958,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ movp(rcx, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+ __ SmiToInteger32(kScratchRegister,
+ FieldOperand(rcx, DebugInfo::kFlagsOffset));
+ __ testl(kScratchRegister, Immediate(DebugInfo::kHasBreakInfo));
+ __ j(zero, &bytecode_array_loaded);
__ movp(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldOperand(rcx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -900,28 +982,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ leave();
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
- r15, rbx);
- __ jmp(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(
@@ -967,7 +1027,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -996,18 +1056,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// rbx and rdx will be modified.
Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(rbx); // Pass the spread in a register
+ __ subp(rax, Immediate(1)); // Subtract one for spread
+ }
+
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode),
RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(receiver_mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(receiver_mode),
RelocInfo::CODE_TARGET);
}
@@ -1047,10 +1111,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// rcx and r8 will be modified.
Generate_InterpreterPushArgs(masm, rax, rcx, r8);
- // Push return address in preparation for the tail-call.
- __ PushReturnAddressFrom(kScratchRegister);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(rbx); // Pass the spread in a register
+ __ subp(rax, Immediate(1)); // Subtract one for spread
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+ } else {
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ AssertUndefinedOrAllocationSite(rbx);
+ }
- __ AssertUndefinedOrAllocationSite(rbx);
if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1193,6 +1264,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = rdi;
+
+ // Get the feedback vector.
+ Register feedback_vector = rbx;
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1201,40 +1299,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = rdi;
+ Register feedback_vector = rbx;
// Do we have a valid feedback vector?
- __ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = rcx;
- __ movp(entry,
- FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ testl(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
- __ jmp(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = rcx;
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1252,15 +1333,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1398,31 +1470,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ Popad();
+ // Preserve possible return result from lazy deopt.
+ __ pushq(rax);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ popq(rax);
// Tear down internal frame.
}
__ DropUnderReturnAddress(1); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
+ __ ret(0); // Return to ContinueToBuiltin stub still on stack.
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ movq(Operand(rsp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
+ rax);
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ popq(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiToInteger32(Register::from_code(code), Register::from_code(code));
+ }
+ }
+ __ movq(
+ rbp,
+ Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ const int offsetToPC =
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ __ popq(Operand(rsp, offsetToPC));
+ __ Drop(offsetToPC / kPointerSize);
+ __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ Ret();
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1482,7 +1593,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- rsp[24] : receiver
// -----------------------------------
- // 1. Load receiver into rdi, argArray into rax (if present), remove all
+ // 1. Load receiver into rdi, argArray into rbx (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1505,34 +1616,28 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argArray
+ // -- rbx : argArray
// -- rdi : receiver
// -- rsp[0] : return address
// -- rsp[8] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(rdi, &receiver_not_callable, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(rax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ __ JumpIfRoot(rbx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &no_arguments,
Label::kNear);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver. Since we did not create a frame for
@@ -1542,14 +1647,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Set(rax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1614,7 +1711,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- rsp[32] : receiver
// -----------------------------------
- // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // 1. Load target into rdi (if present), argumentsList into rbx (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1636,36 +1733,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argumentsList
+ // -- rbx : argumentsList
// -- rdi : target
// -- rsp[0] : return address
// -- rsp[8] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(rdi, &target_not_callable, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1678,7 +1761,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- rsp[32] : receiver
// -----------------------------------
- // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // 1. Load target into rdi (if present), argumentsList into rbx (if present),
// new.target into rdx (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1702,51 +1785,27 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argumentsList
+ // -- rbx : argumentsList
// -- rdx : new.target
// -- rdi : target
// -- rsp[0] : return address
// -- rsp[8] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(rdi, &target_not_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
@@ -1918,7 +1977,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2073,7 +2132,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2268,94 +2327,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rax : argumentsList
// -- rdi : target
- // -- rdx : new.target (checked to be constructor or undefined)
- // -- rsp[0] : return address.
- // -- rsp[8] : thisArgument
+ // -- rax : number of parameters on the stack (not including the receiver)
+ // -- rbx : arguments list (a FixedArray)
+ // -- rcx : len (number of elements to push from args)
+ // -- rdx : new.target (for [[Construct]])
+ // -- rsp[0] : return address
// -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(rax, &create_runtime);
-
- // Load the map of argumentsList into rcx.
- __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
-
- // Load native context into rbx.
- __ movp(rbx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmpp(rcx, ContextOperand(rbx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmpp(rcx, ContextOperand(rbx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ Push(rdx);
- __ Push(rax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(rdx);
- __ Pop(rdi);
- __ SmiToInteger32(rbx, FieldOperand(rax, FixedArray::kLengthOffset));
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ movp(rbx, FieldOperand(rax, JSArgumentsObject::kLengthOffset));
- __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
- __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiToInteger32(rbx, rbx);
- __ movp(rax, rcx);
- __ jmp(&done_create);
-
- __ bind(&create_holey_array);
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ cmpp(rcx, ContextOperand(rbx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &create_runtime);
- __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
- __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(rcx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array);
- __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array);
- __ j(above, &create_runtime);
- __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
- __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ __ AssertFixedArray(rbx);
// Check for stack overflow.
{
@@ -2363,61 +2345,48 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- __ sarp(rcx, Immediate(kPointerSizeLog2));
+ __ movp(r8, rsp);
+ // Make r8 the space we have left. The stack might already be overflowed
+ // here which will cause r8 to become negative.
+ __ subp(r8, kScratchRegister);
+ __ sarp(r8, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, rbx);
+ __ cmpp(r8, rcx);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- rdi : target
- // -- rax : args (a FixedArray built from argumentsList)
- // -- rbx : len (number of elements to push from args)
- // -- rdx : new.target (checked to be constructor or undefined)
- // -- rsp[0] : return address.
- // -- rsp[8] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
+ // Push additional arguments onto the stack.
{
__ PopReturnAddressTo(r8);
- __ Set(rcx, 0);
+ __ Set(r9, 0);
Label done, push, loop;
__ bind(&loop);
- __ cmpl(rcx, rbx);
+ __ cmpl(r9, rcx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ movp(r9, FieldOperand(rax, rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ movp(r11,
+ FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
__ bind(&push);
- __ Push(r9);
- __ incl(rcx);
+ __ Push(r11);
+ __ incl(r9);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(r8);
- __ Move(rax, rcx);
+ __ addq(rax, r9);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the new target (for [[Construct]] calls)
@@ -2434,8 +2403,8 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
__ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- r8, r8, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movl(r8,
+ FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx, rbp);
}
__ jmp(&arguments_done, Label::kNear);
@@ -2477,98 +2446,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is active.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ Move(kScratchRegister, is_tail_call_elimination_enabled);
- __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
- __ j(equal, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ movp(rbp, scratch2);
- __ SmiToInteger32(
- caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ movp(scratch1, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- caller_args_count_reg, scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
@@ -2580,8 +2460,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
@@ -2593,14 +2473,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2666,12 +2544,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rsi : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
- __ LoadSharedFunctionInfoSpecialField(
- rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movsxlq(
+ rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(rax);
ParameterCount expected(rbx);
@@ -2772,18 +2646,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
__ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
@@ -2801,8 +2670,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
@@ -2813,10 +2681,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(rdi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
// Check if target has a [[Call]] internal method.
@@ -2824,24 +2692,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
- __ PushReturnAddressFrom(kScratchRegister);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ addp(rax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ Load(rcx, ExternalReference(Builtins::kCallProxy, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2851,7 +2708,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2863,148 +2720,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Label runtime_call, push_args;
- // Load the spread argument into rbx.
- __ movp(rbx, Operand(rsp, kPointerSize));
- __ JumpIfSmi(rbx, &runtime_call);
- // Load the map of the spread into r15.
- __ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load native context into r14.
- __ movp(r14, NativeContextOperand());
-
- // Check that the spread is an array.
- __ CmpInstanceType(r15, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
- __ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ movp(rcx,
- ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpp(rcx, ContextOperand(
- r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(rcx);
- __ cmpp(rcx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmpp(rcx, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ SmiToInteger32(r9, FieldOperand(rbx, JSArray::kLengthOffset));
- __ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi); // target
- __ Push(rdx); // new target
- __ Integer32ToSmi(rax, rax);
- __ Push(rax); // nargs
- __ Push(rbx);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ movp(rbx, rax);
- __ Pop(rax); // nargs
- __ SmiToInteger32(rax, rax);
- __ Pop(rdx); // new target
- __ Pop(rdi); // target
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
-
- __ bind(&push_args);
- // rax += r9 - 1. Subtract 1 for the spread itself.
- __ leap(rax, Operand(rax, r9, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- __ sarp(rcx, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(rcx, r9);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(r8);
- __ Pop(rcx);
-
- __ Set(rcx, 0);
- Label done, push, loop;
- __ bind(&loop);
- __ cmpl(rcx, r9);
- __ j(equal, &done, Label::kNear);
- __ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(kScratchRegister);
- __ incl(rcx);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(r8);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rdi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push rdx to save it.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3127,19 +2842,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rdx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- rdi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
diff --git a/deps/v8/src/builtins/x87/OWNERS b/deps/v8/src/builtins/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/builtins/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
deleted file mode 100644
index 55b5dc4f56..0000000000
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ /dev/null
@@ -1,3183 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/code-factory.h"
-#include "src/codegen.h"
-#include "src/deoptimizer.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- edi : target
- // -- edx : new.target
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
- const int num_extra_args = 3;
- __ add(eax, Immediate(num_extra_args + 1));
-
- // Insert extra arguments.
- __ PopReturnAddressTo(ecx);
- __ SmiTag(eax);
- __ Push(eax);
- __ SmiUntag(eax);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
-
- __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
- exit_frame_type == BUILTIN_EXIT);
-}
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
- __ SmiTag(eax);
- __ push(eax);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
- // Function is also the parameter to the runtime call.
- __ push(edi);
-
- __ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
-
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
-namespace {
-
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- esi: context
- // -- edi: constructor function
- // -- edx: new target
- // -----------------------------------
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(eax);
- __ push(esi);
- __ push(eax);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(edi);
- __ Push(edx);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(ebx, eax);
- __ Pop(edx);
- __ Pop(edi);
-
- // ----------- S t a t e -------------
- // -- edi: constructor function
- // -- ebx: newly allocated object
- // -- edx: new target
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- }
-
- __ SmiUntag(eax);
-
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
-
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver, Label::kNear);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &exit, Label::kNear);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The
- // arguments count is stored below the receiver.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- } else {
- __ mov(ebx, Operand(esp, 0));
- }
-
- // Leave construct frame.
- }
-
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label dont_throw;
- __ JumpIfNotSmi(eax, &dont_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
- // Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
- if (create_implicit_receiver) {
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- }
- __ ret(0);
-}
-
-} // namespace
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
-}
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
-}
-
-void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
-}
-
-enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
-
-// Clobbers ecx, edx, edi; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- IsTagged eax_is_tagged) {
- // eax : the number of items to be pushed to the stack
- //
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
- __ shl(edx, kPointerSizeLog2 - smi_tag);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow);
-
- __ bind(&okay);
-}
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
- masm->isolate());
- __ mov(esi, Operand::StaticVariable(context_address));
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Push the function and the receiver onto the stack.
- __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
-
- // Check if we have enough stack space to push all arguments.
- // Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
-
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Move(ecx, Immediate(0));
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Get the new.target and function from the frame.
- __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
- __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-
- // Invoke the code.
- Handle<Code> builtin = is_construct
- ? masm->isolate()->builtins()->Construct()
- : masm->isolate()->builtins()->Call();
- __ Call(builtin, RelocInfo::CODE_TARGET);
-
- // Exit the internal frame. Notice that this also removes the empty.
- // context and the function left on the stack by the code
- // invocation.
- }
- __ ret(kPointerSize); // Remove receiver.
-}
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-// static
-void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the value to pass to the generator
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertGeneratorObject(ebx);
-
- // Store input value into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
- __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
- kDontSaveFPRegs);
-
- // Store resume mode into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
-
- // Load suspended function and context.
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Flood function if we are stepping.
- Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
- Label stepping_prepared;
- ExternalReference debug_hook =
- ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
- __ j(not_equal, &prepare_step_in_if_stepping);
-
- // Flood function if we need to continue stepping in the suspended generator.
- ExternalReference debug_suspended_generator =
- ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
- __ j(equal, &prepare_step_in_suspended_generator);
- __ bind(&stepping_prepared);
-
- // Pop return address.
- __ PopReturnAddressTo(eax);
-
- // Push receiver.
- __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
- // ----------- S t a t e -------------
- // -- eax : return address
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
- // -- edi : generator function
- // -- esi : generator context
- // -- esp[0] : generator receiver
- // -----------------------------------
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(carry, &done_loop, Label::kNear);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Underlying function needs to have bytecode available.
- if (FLAG_debug_code) {
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ Assert(equal, kMissingBytecodeArray);
- }
-
- // Resume (Ignition/TurboFan) generator object.
- {
- __ PushReturnAddressFrom(eax);
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- // We abuse new.target both to indicate that this is a resume call and to
- // pass in the generator object. In ordinary calls, new.target is always
- // undefined because generator functions are non-constructable.
- __ mov(edx, ebx);
- __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- __ bind(&prepare_step_in_if_stepping);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edx);
- __ Push(edi);
- __ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- }
- __ jmp(&stepping_prepared);
-
- __ bind(&prepare_step_in_suspended_generator);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edx);
- __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- }
- __ jmp(&stepping_prepared);
-}
-
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
- Register scratch2) {
- Register args_count = scratch1;
- Register return_pc = scratch2;
-
- // Get the arguments + reciever count.
- __ mov(args_count,
- Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(args_count,
- FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
-
- // Leave the frame (also dropping the register file).
- __ leave();
-
- // Drop receiver + arguments.
- __ pop(return_pc);
- __ add(esp, args_count);
- __ push(return_pc);
-}
-
-// Generate code for entering a JS function with the interpreter.
-// On entry to the function the receiver and arguments have been pushed on the
-// stack left to right. The actual argument count matches the formal parameter
-// count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called
-// o edx: the new target
-// o esi: our context
-// o ebp: the caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
-void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
- __ push(edx); // Callee's new target.
-
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
- __ bind(&bytecode_array_loaded);
-
- // Check whether we should continue to use the interpreter.
- // TODO(rmcilroy) Remove self healing once liveedit only has to deal with
- // Ignition bytecode.
- Label switch_to_different_code_kind;
- __ Move(ecx, masm->CodeObject()); // Self-reference to this code.
- __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ j(not_equal, &switch_to_different_code_kind);
-
- // Increment invocation count for the function.
- __ EmitLoadFeedbackVector(ecx);
- __ add(
- FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
-
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- eax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
- // Reset code age.
- __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset),
- Immediate(BytecodeArray::kNoAgeBytecodeAge));
-
- // Push bytecode array.
- __ push(kInterpreterBytecodeArrayRegister);
- // Push Smi tagged initial bytecode array offset.
- __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
-
- // Allocate the local and temporary register file on the stack.
- {
- // Load frame size from the BytecodeArray object.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kFrameSizeOffset));
-
- // Do a stack check to ensure we don't go over the limit.
- Label ok;
- __ mov(ecx, esp);
- __ sub(ecx, ebx);
- ExternalReference stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ cmp(ecx, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
-
- // If ok, push undefined as the initial value for all register file entries.
- Label loop_header;
- Label loop_check;
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- __ jmp(&loop_check);
- __ bind(&loop_header);
- // TODO(rmcilroy): Consider doing more than one push per loop iteration.
- __ push(eax);
- // Continue loop if not done.
- __ bind(&loop_check);
- __ sub(ebx, Immediate(kPointerSize));
- __ j(greater_equal, &loop_header);
- }
-
- // Load accumulator, bytecode offset and dispatch table into registers.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ mov(kInterpreterBytecodeOffsetRegister,
- Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Dispatch to the first bytecode handler for the function.
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
- masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
-
- // The return value is in eax.
- LeaveInterpreterFrame(masm, ebx, ecx);
- __ ret(0);
-
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ jmp(&bytecode_array_loaded);
-
- // If the shared code is no longer this entry trampoline, then the underlying
- // function has been switched to a different kind of code and we heal the
- // closure by switching the code entry field over to the new code as well.
- __ bind(&switch_to_different_code_kind);
- __ pop(edx); // Callee's new target.
- __ pop(edi); // Callee's JS function.
- __ pop(esi); // Callee's context.
- __ leave(); // Leave the frame so we can tail call.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
- __ RecordWriteCodeEntryField(edi, ecx, ebx);
- __ jmp(ecx);
-}
-
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
- // Make scratch2 the space we have left. The stack might already be overflowed
- // here which will cause scratch2 to become negative.
- __ mov(scratch2, esp);
- __ sub(scratch2, scratch1);
- // Make scratch1 the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(scratch1, num_args);
- if (include_receiver) {
- __ add(scratch1, Immediate(1));
- }
- __ shl(scratch1, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch2, scratch1);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
-static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register array_limit,
- Register start_address) {
- // ----------- S t a t e -------------
- // -- start_address : Pointer to the last argument in the args array.
- // -- array_limit : Pointer to one before the first argument in the
- // args array.
- // -----------------------------------
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(start_address, 0));
- __ sub(start_address, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(start_address, array_limit);
- __ j(greater, &loop_header, Label::kNear);
-}
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode,
- InterpreterPushArgsMode mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
- // -----------------------------------
- Label stack_overflow;
- // Compute the expected number of arguments.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
-
- // Add a stack check before pushing the arguments. We need an extra register
- // to perform a stack check. So push it onto the stack temporarily. This
- // might cause stack overflow, but it will be detected by the check.
- __ Push(edi);
- Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
- __ Pop(edi);
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
- Generate_InterpreterPushArgs(masm, ecx, ebx);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
-
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Jump(masm->isolate()->builtins()->CallWithSpread(),
- RelocInfo::CODE_TARGET);
- } else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
- }
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edi);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-namespace {
-
-// This function modified start_addr, and only reads the contents of num_args
-// register. scratch1 and scratch2 are used as temporary registers. Their
-// original values are restored after the use.
-void Generate_InterpreterPushArgsThenReturnAddress(
- MacroAssembler* masm, Register num_args, Register start_addr,
- Register scratch1, Register scratch2, bool receiver_in_args,
- int num_slots_above_ret_addr, Label* stack_overflow) {
- // We have to move return address and the temporary registers above it
- // before we can copy arguments onto the stack. To achieve this:
- // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values above it to the top of stack.
- // Step 3: Copy the arguments into the correct locations.
- // current stack =====> required stack layout
- // | | | scratch1 | (2) <-- esp(1)
- // | | | .... | (2)
- // | | | scratch-n | (2)
- // | | | return addr | (2)
- // | | | arg N | (3)
- // | scratch1 | <-- esp | .... |
- // | .... | | arg 0 |
- // | scratch-n | | arg 0 |
- // | return addr | | receiver slot |
-
- // Check for stack overflow before we increment the stack pointer.
- Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
- stack_overflow, true);
-
-// Step 1 - Update the stack pointer. scratch1 already contains the required
-// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
-// the Generate_StackOverflowCheck.
-
-#ifdef _MSC_VER
- // TODO(mythria): Move it to macro assembler.
- // In windows, we cannot increment the stack size by more than one page
- // (mimimum page size is 4KB) without accessing at least one byte on the
- // page. Check this:
- // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
- const int page_size = 4 * 1024;
- Label check_offset, update_stack_pointer;
- __ bind(&check_offset);
- __ cmp(scratch1, page_size);
- __ j(less, &update_stack_pointer);
- __ sub(esp, Immediate(page_size));
- // Just to touch the page, before we increment further.
- __ mov(Operand(esp, 0), Immediate(0));
- __ sub(scratch1, Immediate(page_size));
- __ jmp(&check_offset);
- __ bind(&update_stack_pointer);
-#endif
-
- __ sub(esp, scratch1);
-
- // Step 2 move return_address and slots above it to the correct locations.
- // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
- // basically when the source and destination overlap. We at least need one
- // extra slot for receiver, so no extra checks are required to avoid copy.
- for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
- __ mov(scratch1,
- Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
- __ mov(Operand(esp, i * kPointerSize), scratch1);
- }
-
- // Step 3 copy arguments to correct locations.
- if (receiver_in_args) {
- __ mov(scratch1, num_args);
- __ add(scratch1, Immediate(1));
- } else {
- // Slot meant for receiver contains return address. Reset it so that
- // we will not incorrectly interpret return address as an object.
- __ mov(Operand(esp, num_args, times_pointer_size,
- (num_slots_above_ret_addr + 1) * kPointerSize),
- Immediate(0));
- __ mov(scratch1, num_args);
- }
-
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ mov(scratch2, Operand(start_addr, 0));
- __ mov(Operand(esp, scratch1, times_pointer_size,
- num_slots_above_ret_addr * kPointerSize),
- scratch2);
- __ sub(start_addr, Immediate(kPointerSize));
- __ sub(scratch1, Immediate(1));
- __ bind(&loop_check);
- __ cmp(scratch1, Immediate(0));
- __ j(greater, &loop_header, Label::kNear);
-}
-
-} // end anonymous namespace
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
- MacroAssembler* masm, InterpreterPushArgsMode mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target
- // -- edi : the constructor
- // -- ebx : allocation site feedback (if available or undefined)
- // -- ecx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -----------------------------------
- Label stack_overflow;
- // We need two scratch registers. Push edi and edx onto stack.
- __ Push(edi);
- __ Push(edx);
-
- // Push arguments and move return address to the top of stack.
- // The eax register is readonly. The ecx register will be modified. The edx
- // and edi registers will be modified but restored to their original values.
- Generate_InterpreterPushArgsThenReturnAddress(masm, eax, ecx, edx, edi, false,
- 2, &stack_overflow);
-
- // Restore edi and edx
- __ Pop(edx);
- __ Pop(edi);
-
- __ AssertUndefinedOrAllocationSite(ebx);
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ AssertFunction(edi);
-
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- // Call the constructor with unmodified eax, edi, edx values.
- __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
- // Call the constructor with unmodified eax, edi, edx values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edx);
- __ Pop(edi);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenConstructArray(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the target to call checked to be Array function.
- // -- ebx : the allocation site feedback
- // -- ecx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -----------------------------------
- Label stack_overflow;
- // We need two scratch registers. Register edi is available, push edx onto
- // stack.
- __ Push(edx);
-
- // Push arguments and move return address to the top of stack.
- // The eax register is readonly. The ecx register will be modified. The edx
- // and edi registers will be modified but restored to their original values.
- Generate_InterpreterPushArgsThenReturnAddress(masm, eax, ecx, edx, edi, true,
- 1, &stack_overflow);
-
- // Restore edx.
- __ Pop(edx);
-
- // Array constructor expects constructor in edi. It is same as edx here.
- __ Move(edi, edx);
-
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edx);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
- // Set the return address to the correct point in the interpreter entry
- // trampoline.
- Smi* interpreter_entry_return_pc_offset(
- masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- __ LoadHeapObject(ebx,
- masm->isolate()->builtins()->InterpreterEntryTrampoline());
- __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
-
- // Initialize the dispatch table register.
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Get the bytecode array pointer from the frame.
- __ mov(kInterpreterBytecodeArrayRegister,
- Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-
- if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- ebx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
- // Get the target bytecode offset from the frame.
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
-
- // Dispatch to the target bytecode.
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
- // Advance the current bytecode offset stored within the given interpreter
- // stack frame. This simulates what all bytecode handlers do upon completion
- // of the underlying operation.
- __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister);
- __ Push(ebx); // First argument is the bytecode array.
- __ Push(edx); // Second argument is the bytecode offset.
- __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
- __ Move(edx, eax); // Result is the new bytecode offset.
- __ Pop(kInterpreterAccumulatorRegister);
- }
- __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
-
- Generate_InterpreterEnterBytecode(masm);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- Generate_InterpreterEnterBytecode(masm);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label try_shared;
- Label loop_top, loop_bottom;
-
- Register closure = edi;
- Register new_target = edx;
- Register argument_count = eax;
-
- // Do we have a valid feedback vector?
- __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(ebx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- Register map = argument_count;
- Register index = ebx;
- __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
- __ cmp(index, Immediate(Smi::FromInt(2)));
- __ j(less, &try_shared);
-
- // edx : native context
- // ebx : length / index
- // eax : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = edx;
- __ mov(native_context, NativeContextOperand());
-
- __ bind(&loop_top);
- Register temp = edi;
-
- // Does the native context match?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ j(not_equal, &loop_bottom);
- // Code available?
- Register entry = ecx;
- __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, eax);
-
- // Link the closure into the optimized function list.
- // ecx : code entry
- // edx : native context
- // edi : closure
- __ mov(ebx,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ mov(ebx, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
- kDontSaveFPRegs);
- __ mov(closure, ebx);
- __ pop(new_target);
- __ pop(argument_count);
- __ jmp(entry);
-
- __ bind(&loop_bottom);
- __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(index, Immediate(Smi::FromInt(1)));
- __ j(greater, &loop_top);
-
- // We found no code.
- __ jmp(&gotta_call_runtime);
-
- __ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime_no_stack);
-
- // If SFI points to anything other than CompileLazy, install that.
- __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Move(ebx, masm->CodeObject());
- __ cmp(entry, ebx);
- __ j(equal, &gotta_call_runtime_no_stack);
-
- // Install the SFI's code entry.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, ebx);
- __ jmp(entry);
-
- __ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
-void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- Label failed;
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve argument count for later compare.
- __ mov(ecx, eax);
- // Push the number of arguments to the callee.
- __ SmiTag(eax);
- __ push(eax);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
-
- // The function.
- __ push(edi);
- // Copy arguments from caller (stdlib, foreign, heap).
- Label args_done;
- for (int j = 0; j < 4; ++j) {
- Label over;
- if (j < 3) {
- __ cmp(ecx, Immediate(j));
- __ j(not_equal, &over, Label::kNear);
- }
- for (int i = j - 1; i >= 0; --i) {
- __ Push(Operand(
- ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
- }
- for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
- if (j < 3) {
- __ jmp(&args_done, Label::kNear);
- __ bind(&over);
- }
- }
- __ bind(&args_done);
-
- // Call runtime, on success unwind frame, and parent frame.
- __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
- // A smi 0 is returned on failure, an object on success.
- __ JumpIfSmi(eax, &failed, Label::kNear);
-
- __ Drop(2);
- __ Pop(ecx);
- __ SmiUntag(ecx);
- scope.GenerateLeaveFrame();
-
- __ PopReturnAddressTo(ebx);
- __ inc(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
- __ PushReturnAddressFrom(ebx);
- __ ret(0);
-
- __ bind(&failed);
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
- }
- // On failure, tail call back to regular js.
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ sub(Operand(esp, 0), Immediate(5));
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- }
- __ popad();
- __ ret(0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- }
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
- // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
- // that make_code_young doesn't do any garbage collection which allows us to
- // save/restore the registers without worrying about which of them contain
- // pointers.
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- { // NOLINT
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
- 2);
- }
- __ popad();
-
- // Perform prologue operations usually performed by the young code stub.
- __ pop(eax); // Pop return address into scratch register.
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
- __ push(eax); // Push return address after frame prologue.
-
- // Jump to point after the code-age stub.
- __ ret(0);
-}
-
-void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
- GenerateMakeCodeYoungAgainCommon(masm);
-}
-
-void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
- Generate_MarkCodeAsExecutedOnce(masm);
-}
-
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
-}
-
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
-}
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- Label not_no_registers, not_tos_eax;
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
- __ j(not_equal, &not_tos_eax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort(kNoCasesLeft);
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-// static
-void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : argArray
- // -- esp[8] : thisArg
- // -- esp[12] : receiver
- // -----------------------------------
-
- // 1. Load receiver into edi, argArray into eax (if present), remove all
- // arguments from the stack (including the receiver), and push thisArg (if
- // present) instead.
- {
- Label no_arg_array, no_this_arg;
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ mov(ebx, edx);
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
- {
- __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
- __ cmp(eax, Immediate(1));
- __ j(equal, &no_arg_array, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
- __ bind(&no_arg_array);
- }
- __ bind(&no_this_arg);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argArray
- // -- edi : receiver
- // -- esp[0] : return address
- // -- esp[4] : thisArg
- // -----------------------------------
-
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
-
- // 3. Tail call with no arguments if argArray is null or undefined.
- Label no_arguments;
- __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
- Label::kNear);
-
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The argArray is either null or undefined, so we tail call without any
- // arguments to the receiver.
- __ bind(&no_arguments);
- {
- __ Set(eax, 0);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- }
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
-}
-
-// static
-void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
- // Stack Layout:
- // esp[0] : Return address
- // esp[8] : Argument n
- // esp[16] : Argument n-1
- // ...
- // esp[8 * n] : Argument 1
- // esp[8 * (n + 1)] : Receiver (callable to call)
- //
- // eax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(ebx);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ PushReturnAddressFrom(ebx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-
- // 4. Call the callable.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : argumentsList
- // -- esp[8] : thisArgument
- // -- esp[12] : target
- // -- esp[16] : receiver
- // -----------------------------------
-
- // 1. Load target into edi (if present), argumentsList into eax (if present),
- // remove all arguments from the stack (including the receiver), and push
- // thisArgument (if present) instead.
- {
- Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ mov(edx, edi);
- __ mov(ebx, edi);
- __ cmp(eax, Immediate(1));
- __ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
- __ j(equal, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
- __ cmp(eax, Immediate(3));
- __ j(below, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
- __ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edi : target
- // -- esp[0] : return address
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
-}
-
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : new.target (optional)
- // -- esp[8] : argumentsList
- // -- esp[12] : target
- // -- esp[16] : receiver
- // -----------------------------------
-
- // 1. Load target into edi (if present), argumentsList into eax (if present),
- // new.target into edx (if present, otherwise use target), remove all
- // arguments from the stack (including the receiver), and push thisArgument
- // (if present) instead.
- {
- Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ mov(edx, edi);
- __ mov(ebx, edi);
- __ cmp(eax, Immediate(1));
- __ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
- __ mov(edx, edi);
- __ j(equal, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
- __ cmp(eax, Immediate(3));
- __ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
- __ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edx : new.target
- // -- edi : target
- // -- esp[0] : return address
- // -- esp[4] : receiver (undefined)
- // -----------------------------------
-
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-}
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- // tail call a stub
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
- __ mov(edx, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- }
-
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into ebx.
- Label no_arguments;
- {
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(eax);
- __ EnterBuiltinFrame(esi, edi, eax);
- __ mov(eax, ebx);
- __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx); // Argc popped to ebx.
- __ SmiUntag(ebx);
- }
-
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-
- // 2b. No arguments, return +0 (already in eax).
- __ bind(&no_arguments);
- __ ret(1 * kPointerSize);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Store argc in r8.
- __ mov(ecx, eax);
- __ SmiTag(ecx);
-
- // 2. Load the first argument into ebx.
- {
- Label no_arguments, done;
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ Move(ebx, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure ebx is a number.
- {
- Label done_convert;
- __ JumpIfSmi(ebx, &done_convert);
- __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(equal, &done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(edx);
- __ Move(eax, ebx);
- __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
- __ Move(ebx, eax);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Restore esi.
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(ebx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(esi);
- __ SmiUntag(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(esi);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into eax.
- Label no_arguments;
- {
- __ mov(ebx, eax); // Store argc in ebx.
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. At least one argument, return eax if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(eax, &to_string, Label::kNear);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above, &to_string, Label::kNear);
- __ j(equal, &symbol_descriptive_string, Label::kNear);
- __ jmp(&drop_frame_and_ret, Label::kNear);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ ret(1 * kPointerSize);
- }
-
- // 3a. Convert eax to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 3b. Convert symbol in eax to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ Push(eax);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- __ mov(ebx, eax);
-
- // 2. Load the first argument into eax.
- {
- Label no_arguments, done;
- __ test(ebx, ebx);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure eax is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
- __ j(below, &done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(edx);
- __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
- // as needed after the call.
- __ mov(esi, eax);
- __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- {
- // Restore eax to the first argument and esi to the context.
- __ mov(eax, esi);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- }
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(eax); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve eax,
- // ebx and ecx because these registers are used when copying the
- // arguments and the receiver.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
- __ push(edi);
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
-}
-
-// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edi : target
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(eax, &create_runtime);
-
- // Load the map of argumentsList into ecx.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Load native context into ebx.
- __ mov(ebx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(edx);
- __ Pop(edi);
- __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
- __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ jmp(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(ecx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ j(above, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(ecx, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, ebx);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // ----------- S t a t e -------------
- // -- edi : target
- // -- eax : args (a FixedArray built from argumentsList)
- // -- ebx : len (number of elements to push from args)
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
- {
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
-
- __ PopReturnAddressTo(edx);
- __ Move(ecx, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(ecx, ebx);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ mov(edi,
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(edi);
- __ inc(ecx);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(edx);
-
- // Restore edx/edi from stX0/stX1.
- __ lea(esp, Operand(esp, -2 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ pop(edx);
- __ pop(edi);
-
- __ Move(eax, ebx);
- }
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
-}
-
-// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
- // ----------- S t a t e -------------
- // -- edi : the target to call (can be any Object)
- // -- ecx : start index (to support rest parameters)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ebx, ebp);
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- // Just load the length from the ArgumentsAdaptorFrame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- }
- __ bind(&arguments_done);
-
- Label stack_empty, stack_done;
- __ SmiUntag(eax);
- __ sub(eax, ecx);
- __ j(less_equal, &stack_empty);
- {
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- Label done;
- __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex);
- // Make ecx the space we have left. The stack might already be
- // overflowed here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, eax);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Forward the arguments from the caller frame.
- {
- Label loop;
- __ mov(ecx, eax);
- __ pop(edx);
- __ bind(&loop);
- {
- __ Push(Operand(ebx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
- }
- __ push(edx);
- }
- }
- __ jmp(&stack_done, Label::kNear);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ Move(eax, Immediate(0));
- }
- __ bind(&stack_done);
-
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ movzx_b(scratch1,
- Operand::StaticVariable(is_tail_call_elimination_enabled));
- __ cmp(scratch1, Immediate(0));
- __ j(equal, &done, Label::kNear);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ mov(
- caller_args_count_reg,
- FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, 0);
- __ bind(&done);
-}
-} // namespace
-
-// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the function to call (checked to be a JSFunction)
- // -----------------------------------
- __ AssertFunction(edi);
-
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
- __ j(not_zero, &class_constructor);
-
- // Enter the context of the function; ToObject has to run in the function
- // context, and we also need to take the global proxy from the function
- // context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We need to convert the receiver for non-native sloppy mode functions.
- Label done_convert;
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
- __ j(not_zero, &done_convert);
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the shared function info.
- // -- edi : the function to call (checked to be a JSFunction)
- // -- esi : the function context.
- // -----------------------------------
-
- if (mode == ConvertReceiverMode::kNullOrUndefined) {
- // Patch receiver to global proxy.
- __ LoadGlobalProxy(ecx);
- } else {
- Label convert_to_object, convert_receiver;
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
- Label convert_global_proxy;
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy, Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
- // Patch receiver to global proxy.
- __ LoadGlobalProxy(ecx);
- }
- __ jmp(&convert_receiver);
- }
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- __ Push(esi);
- __ Call(masm->isolate()->builtins()->ToObject(),
- RelocInfo::CODE_TARGET);
- __ Pop(esi);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
- }
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
- }
- __ bind(&done_convert);
-
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the shared function info.
- // -- edi : the function to call (checked to be a JSFunction)
- // -- esi : the function context.
- // -----------------------------------
-
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- // Reload shared function info.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- }
-
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
- ParameterCount actual(eax);
- ParameterCount expected(ebx);
- __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
- CheckDebugStepCallWrapper());
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
-}
-
-namespace {
-
-void Generate_PushBoundArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -----------------------------------
-
- // Load [[BoundArguments]] into ecx and length of that into ebx.
- Label no_bound_arguments;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ test(ebx, ebx);
- __ j(zero, &no_bound_arguments);
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
- // -- ebx : the number of [[BoundArguments]]
- // -----------------------------------
-
- // Reserve stack space for the [[BoundArguments]].
- {
- Label done;
- __ lea(ecx, Operand(ebx, times_pointer_size, 0));
- __ sub(esp, ecx);
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- // Restore the stack pointer.
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
- __ bind(&done);
- }
-
- // Adjust effective number of arguments to include return address.
- __ inc(eax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(ecx, 0);
- __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
- __ bind(&loop);
- __ fld_s(Operand(ebx, ecx, times_pointer_size, 0));
- __ fstp_s(Operand(esp, ecx, times_pointer_size, 0));
- __ inc(ecx);
- __ cmp(ecx, eax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ bind(&loop);
- __ dec(ebx);
- __ fld_s(
- FieldOperand(ecx, ebx, times_pointer_size, FixedArray::kHeaderSize));
- __ fstp_s(Operand(esp, eax, times_pointer_size, 0));
- __ lea(eax, Operand(eax, 1));
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (eax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ dec(eax);
- }
- __ bind(&no_bound_arguments);
-}
-
-} // namespace
-
-// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the function to call (checked to be a JSBoundFunction)
- // -----------------------------------
- __ AssertBoundFunction(edi);
-
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // Patch the receiver to [[BoundThis]].
- __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
-
- // Push the [[BoundArguments]] onto the stack.
- Generate_PushBoundArguments(masm);
-
- // Call the [[BoundTargetFunction]] via the Call builtin.
- __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ecx, Operand::StaticVariable(ExternalReference(
- Builtins::kCall_ReceiverIsAny, masm->isolate())));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object).
- // -----------------------------------
-
- Label non_callable, non_function, non_smi;
- __ JumpIfSmi(edi, &non_callable);
- __ bind(&non_smi);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
- RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
- RelocInfo::CODE_TARGET);
-
- // Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &non_callable);
-
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(eax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
-
- // 2. Call to something else, which might have a [[Call]] internal method (if
- // not we raise an exception).
- __ bind(&non_function);
- // Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- // Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
- RelocInfo::CODE_TARGET);
-
- // 3. Call to something that is not callable.
- __ bind(&non_callable);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable);
- }
-}
-
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- // Free up some registers.
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
-
- Register argc = eax;
-
- Register scratch = ecx;
- Register scratch2 = edi;
-
- Register spread = ebx;
- Register spread_map = edx;
-
- Register spread_len = edx;
-
- Label runtime_call, push_args;
- __ mov(spread, Operand(esp, kPointerSize));
- __ JumpIfSmi(spread, &runtime_call);
- __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
- __ mov(scratch2, NativeContextOperand());
- __ cmp(scratch,
- ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ mov(scratch2, NativeContextOperand());
- __ mov(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ cmp(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmp(scratch, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Need to save these on the stack.
- // Restore edx/edi from stX0/stX1.
- __ lea(esp, Operand(esp, -2 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ pop(edx);
- __ pop(edi);
-
- __ Push(edi);
- __ Push(edx);
- __ SmiTag(argc);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, eax);
- __ Pop(argc);
- __ SmiUntag(argc);
- __ Pop(edx);
- __ Pop(edi);
- // Free up some registers.
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ lea(argc, Operand(argc, spread_len, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ neg(scratch);
- __ add(scratch, esp);
- __ sar(scratch, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, spread_len);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- Register return_address = edi;
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(return_address);
- __ Pop(scratch);
-
- Register scratch2 = esi;
- // Save esi to stX0, edx/edi in stX1/stX2 now.
- __ push(esi);
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, 1 * kFloatSize));
-
- __ mov(scratch, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ j(equal, &done, Label::kNear);
- __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ inc(scratch);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(return_address);
-
- // Now Restore esi from stX0, edx/edi from stX1/stX2.
- __ lea(esp, Operand(esp, -3 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ fstp_s(MemOperand(esp, 8));
- __ pop(esi);
- __ pop(edx);
- __ pop(edi);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push edx to save it.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (checked to be a constructor)
- // -- edi : the constructor to call (checked to be a JSFunction)
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Calling convention for function specific ConstructStubs require
- // ebx to contain either an AllocationSite or undefined.
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
-
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (checked to be a constructor)
- // -- edi : the constructor to call (checked to be a JSBoundFunction)
- // -----------------------------------
- __ AssertBoundFunction(edi);
-
- // Push the [[BoundArguments]] onto the stack.
- Generate_PushBoundArguments(masm);
-
- // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
- {
- Label done;
- __ cmp(edi, edx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ bind(&done);
- }
-
- // Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ecx, Operand::StaticVariable(
- ExternalReference(Builtins::kConstruct, masm->isolate())));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the constructor to call (checked to be a JSProxy)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- // Call into the Runtime for Proxy [[Construct]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- // Include the pushed new_target, constructor and the receiver.
- __ add(eax, Immediate(3));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
-}
-
-// static
-void Builtins::Generate_Construct(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- // Check if target is a Smi.
- Label non_constructor;
- __ JumpIfSmi(edi, &non_constructor, Label::kNear);
-
- // Dispatch based on instance type.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
- RelocInfo::CODE_TARGET);
-
- // Check if target has a [[Construct]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &non_constructor, Label::kNear);
-
- // Only dispatch to bound functions after checking whether they are
- // constructors.
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
- RelocInfo::CODE_TARGET);
-
- // Only dispatch to proxies after checking whether they are constructors.
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
- RelocInfo::CODE_TARGET);
-
- // Called Construct on an exotic Object with a [[Construct]] internal method.
- {
- // Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- // Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(),
- RelocInfo::CODE_TARGET);
- }
-
- // Called Construct on an Object that doesn't have a [[Construct]] internal
- // method.
- __ bind(&non_constructor);
- __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
- RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : message_id as Smi
- // -- esp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- edx : new target (passed through to callee)
- // -- edi : function (passed through to callee)
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments, stack_overflow;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmp(eax, ebx);
- __ j(less, &too_few);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- __ mov(eax, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ cmp(eax, ebx);
- __ j(less, &copy);
- // eax now contains the expected number of arguments.
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
-
- // Remember expected arguments in ecx.
- __ mov(ecx, ebx);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- // ebx = expected - actual.
- __ sub(ebx, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
- __ j(less, &fill);
-
- // Restore expected arguments.
- __ mov(eax, ecx);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // Restore function pointer.
- __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- // eax : expected number of arguments
- // edx : new target (passed through to callee)
- // edi : function (passed through to callee)
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ call(ecx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ jmp(ecx);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ int3();
- }
-}
-
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
- // Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
- }
-
- Label skip;
- // If the code object is null, just return to the caller.
- __ cmp(eax, Immediate(0));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- __ bind(&skip);
-
- // Drop any potential handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ leave();
- }
-
- // Load deoptimization data from the code object.
- __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
-
- // Load the OSR entrypoint offset from the deoptimization data.
- __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
- __ SmiUntag(ebx);
-
- // Compute the target address = code_obj + header_size + osr_offset
- __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
-
- // Overwrite the return address on the stack.
- __ mov(Operand(esp, 0), eax);
-
- // And "return" to the OSR entry point of the function.
- __ ret(0);
-}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87