summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc156
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc164
-rw-r--r--deps/v8/src/builtins/builtins-api.cc1
-rw-r--r--deps/v8/src/builtins/builtins-array.cc1806
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc29
-rw-r--r--deps/v8/src/builtins/builtins-constructor.cc772
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h68
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc321
-rw-r--r--deps/v8/src/builtins/builtins-date.cc308
-rw-r--r--deps/v8/src/builtins/builtins-function.cc208
-rw-r--r--deps/v8/src/builtins/builtins-generator.cc36
-rw-r--r--deps/v8/src/builtins/builtins-global.cc107
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc239
-rw-r--r--deps/v8/src/builtins/builtins-ic.cc78
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc268
-rw-r--r--deps/v8/src/builtins/builtins-iterator.cc68
-rw-r--r--deps/v8/src/builtins/builtins-math.cc515
-rw-r--r--deps/v8/src/builtins/builtins-number.cc1663
-rw-r--r--deps/v8/src/builtins/builtins-object.cc698
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc1574
-rw-r--r--deps/v8/src/builtins/builtins-promise.h120
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc34
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc3066
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc155
-rw-r--r--deps/v8/src/builtins/builtins-string.cc1451
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc76
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc152
-rw-r--r--deps/v8/src/builtins/builtins-utils.h35
-rw-r--r--deps/v8/src/builtins/builtins.cc17
-rw-r--r--deps/v8/src/builtins/builtins.h1396
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc160
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc171
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc469
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc174
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc168
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc175
-rw-r--r--deps/v8/src/builtins/x87/OWNERS1
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc168
38 files changed, 10113 insertions, 6954 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 6103971787..240d271b2b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -326,11 +326,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
__ SmiUntag(r6);
@@ -474,11 +474,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
__ SmiUntag(r6);
@@ -574,8 +574,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(r1, r3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(r4, r0);
__ Pop(r1, r3);
@@ -737,19 +737,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ ldrsb(ip, MemOperand(ip));
- __ cmp(ip, Operand(StepIn));
- __ b(ge, &prepare_step_in_if_stepping);
+ __ cmp(ip, Operand(0));
+ __ b(ne, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -790,14 +789,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
- __ b(ne, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
@@ -812,54 +812,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(r5);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp);
- __ Move(fp, sp);
- __ Push(cp, r4);
-
- // Restore the operand stack.
- __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
- __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ cmp(r0, r3);
- __ b(eq, &done_loop);
- __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
- __ Push(ip);
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ add(r3, r3, Operand(r2, ASR, 1));
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Move(r0, r1); // Continuation expects generator object in r0.
- __ Jump(r3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r2, r4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r1, r2);
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
}
@@ -1078,6 +1035,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1407,12 +1369,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ b(ne, &loop_bottom);
- // OSR id set to none?
- __ ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
- __ b(ne, &loop_bottom);
// Literals available?
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1485,14 +1441,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ b(ne, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ and_(r5, r5, Operand(Code::KindField::kMask));
- __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
- __ cmp(r5, Operand(Code::BUILTIN));
+ __ Move(r5, masm->CodeObject());
+ __ cmp(entry, r5);
__ b(eq, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
@@ -1609,14 +1565,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(pc, r0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2158,7 +2109,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r0, &create_runtime);
// Load the map of argumentsList into r2.
@@ -2202,17 +2154,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(r0, r4);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ ldr(r4, ContextMemOperand(r4, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(r2, r4);
+ __ b(ne, &create_runtime);
+ __ LoadRoot(r4, Heap::kArrayProtectorRootIndex);
+ __ ldr(r2, FieldMemOperand(r4, PropertyCell::kValueOffset));
+ __ cmp(r2, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ b(ne, &create_runtime);
+ __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ SmiUntag(r2);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r2 and r4 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r2);
+ __ ldr(r5, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r5);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(r2, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ cmp(r5, Operand(FAST_HOLEY_ELEMENTS));
__ b(hi, &create_runtime);
- __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ b(eq, &create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ tst(r5, Operand(1));
+ __ b(ne, &create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
__ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
__ SmiUntag(r2);
@@ -2247,12 +2219,16 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(r4, Operand(0));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
Label done, loop;
__ bind(&loop);
__ cmp(r4, r2);
__ b(eq, &done);
__ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+ __ cmp(r5, ip);
+ __ mov(ip, r6, LeaveCC, eq);
__ Push(ip);
__ add(r4, r4, Operand(1));
__ b(&loop);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index aeb0508a20..08cf664724 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -315,11 +315,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
__ SmiUntag(x6);
@@ -467,11 +467,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
__ SmiUntag(x6);
@@ -569,8 +569,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(constructor, new_target);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Mov(x4, x0);
__ Pop(new_target, constructor);
@@ -744,18 +744,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ Mov(x10, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ Mov(x10, Operand(debug_hook));
__ Ldrsb(x10, MemOperand(x10));
- __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+ __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -789,14 +788,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
__ PushMultipleTimes(x11, w10);
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
- __ B(ne, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w0, FieldMemOperand(
@@ -810,54 +810,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(x5);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp);
- __ Move(fp, jssp);
- __ Push(cp, x4);
-
- // Restore the operand stack.
- __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
- __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
- __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
- {
- Label done_loop, loop;
- __ Bind(&loop);
- __ Cmp(x0, x3);
- __ B(eq, &done_loop);
- __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
- __ Push(x10);
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
- __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
- __ Ldrsw(x11, UntagSmiFieldMemOperand(
- x1, JSGeneratorObject::kContinuationOffset));
- __ Add(x10, x10, x11);
- __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
- __ Move(x0, x1); // Continuation expects generator object in x0.
- __ Br(x10);
- }
-
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x1, x2, x4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(x2, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
@@ -1082,6 +1039,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1411,12 +1373,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Cmp(temp, native_context);
__ B(ne, &loop_bottom);
- // OSR id set to none?
- __ Ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
- __ B(ne, &loop_bottom);
// Literals available?
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1478,14 +1434,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ TestAndBranchIfAnySet(
temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
&gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ and_(x5, x5, Operand(Code::KindField::kMask));
- __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
- __ Cmp(x5, Operand(Code::BUILTIN));
+ __ Move(temp, masm->CodeObject());
+ __ Cmp(entry, temp);
__ B(eq, &gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1599,14 +1555,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Br(x0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2218,7 +2169,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(arguments_list, &create_runtime);
// Load native context.
@@ -2240,7 +2192,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ B(eq, &create_arguments);
// Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+ __ CompareInstanceType(arguments_list_map, x10, JS_ARRAY_TYPE);
__ B(eq, &create_array);
// Ask the runtime to create the list (actually a FixedArray).
@@ -2265,14 +2217,42 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Mov(args, x10);
__ B(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ Bind(&create_holey_array);
+ // -- x2 : arguments_list_map
+ // -- x4 : native_context
+ Register arguments_list_prototype = x2;
+ __ Ldr(arguments_list_prototype,
+ FieldMemOperand(arguments_list_map, Map::kPrototypeOffset));
+ __ Ldr(x10, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Cmp(arguments_list_prototype, x10);
+ __ B(ne, &create_runtime);
+ __ LoadRoot(x10, Heap::kArrayProtectorRootIndex);
+ __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, PropertyCell::kValueOffset));
+ __ Cmp(x11, Isolate::kProtectorValid);
+ __ B(ne, &create_runtime);
+ __ Ldrsw(len,
+ UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+ __ B(&done_create);
+
// Try to create the list from a JSArray object.
__ Bind(&create_array);
__ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(x10);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- // Branch for anything that's not FAST_{SMI_}ELEMENTS.
- __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ // Check if it is a holey array, the order of the cmp is important as
+ // anything higher than FAST_HOLEY_ELEMENTS will fall back to runtime.
+ __ Cmp(x10, FAST_HOLEY_ELEMENTS);
+ __ B(hi, &create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ Tbnz(x10, 0, &create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ Ldrsw(len,
UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
__ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
@@ -2306,16 +2286,24 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label done, loop;
+ Label done, push, loop;
Register src = x4;
__ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Mov(x0, len); // The 'len' argument for Call() or Construct().
__ Cbz(len, &done);
+ Register the_hole_value = x11;
+ Register undefined_value = x12;
+ // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+ // scenes and we want to avoid that in a loop.
+ __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
__ Claim(len);
__ Bind(&loop);
__ Sub(len, len, 1);
__ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+ __ Cmp(x10, the_hole_value);
+ __ Csel(x10, x10, undefined_value, ne);
__ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
__ Cbnz(len, &loop);
__ Bind(&done);
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index defc4dcf62..d3798c3857 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -77,6 +77,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
isolate->ReportFailedAccessCheck(js_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index c09f11b2e8..047d88ecea 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/contexts.h"
#include "src/elements.h"
@@ -32,7 +33,7 @@ inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
*out = static_cast<int>(value);
}
return true;
- } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+ } else if (object->IsNullOrUndefined(isolate)) {
*out = 0;
return true;
} else if (object->IsBoolean()) {
@@ -55,7 +56,13 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
if (!len_obj->IsSmi()) return false;
*out = Max(0, Smi::cast(len_obj)->value());
- return *out <= object->elements()->length();
+
+ FixedArray* parameters = FixedArray::cast(object->elements());
+ if (object->HasSloppyArgumentsElements()) {
+ FixedArray* arguments = FixedArray::cast(parameters->get(1));
+ return *out <= arguments->length();
+ }
+ return *out <= parameters->length();
}
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
@@ -144,14 +151,15 @@ MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
int argc = args.length() - 1;
ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(i + 1);
+ argv[i] = args.at(i + 1);
}
RETURN_RESULT_OR_FAILURE(
isolate,
Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
}
+} // namespace
-Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
+BUILTIN(ArrayPush) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
@@ -174,19 +182,158 @@ Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
int new_length = accessor->Push(array, &args, to_add);
return Smi::FromInt(new_length);
}
-} // namespace
-BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
-
-// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
-// tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_ArrayPush) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
- BuiltinArguments caller_args(argc, incoming->arguments() + 1);
- return DoArrayPush(isolate, caller_args);
+void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
+ Variable arg_index(&assembler, MachineType::PointerRepresentation());
+ Label default_label(&assembler, &arg_index);
+ Label smi_transition(&assembler);
+ Label object_push_pre(&assembler);
+ Label object_push(&assembler, &arg_index);
+ Label double_push(&assembler, &arg_index);
+ Label double_transition(&assembler);
+ Label runtime(&assembler, Label::kDeferred);
+
+ Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+ CodeStubArguments args(&assembler, argc);
+ Node* receiver = args.GetReceiver();
+ Node* kind = nullptr;
+
+ Label fast(&assembler);
+ {
+ assembler.BranchIfFastJSArray(
+ receiver, context, CodeStubAssembler::FastJSArrayAccessMode::ANY_ACCESS,
+ &fast, &runtime);
+ }
+
+ assembler.Bind(&fast);
+ {
+ // Disallow pushing onto prototypes. It might be the JSArray prototype.
+ // Disallow pushing onto non-extensible objects.
+ assembler.Comment("Disallow pushing onto prototypes");
+ Node* map = assembler.LoadMap(receiver);
+ Node* bit_field2 = assembler.LoadMapBitField2(map);
+ int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+ (1 << Map::kIsExtensible);
+ Node* test = assembler.Word32And(bit_field2, assembler.Int32Constant(mask));
+ assembler.GotoIf(
+ assembler.Word32NotEqual(
+ test, assembler.Int32Constant(1 << Map::kIsExtensible)),
+ &runtime);
+
+ // Disallow pushing onto arrays in dictionary named property mode. We need
+ // to figure out whether the length property is still writable.
+ assembler.Comment(
+ "Disallow pushing onto arrays in dictionary named property mode");
+ assembler.GotoIf(assembler.IsDictionaryMap(map), &runtime);
+
+ // Check whether the length property is writable. The length property is the
+ // only default named property on arrays. It's nonconfigurable, hence is
+ // guaranteed to stay the first property.
+ Node* descriptors = assembler.LoadMapDescriptors(map);
+ Node* details = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToDetailsIndex(0));
+ assembler.GotoIf(
+ assembler.IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask),
+ &runtime);
+
+ arg_index.Bind(assembler.IntPtrConstant(0));
+ kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+
+ assembler.GotoIf(
+ assembler.Int32GreaterThan(
+ kind, assembler.Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ &object_push_pre);
+
+ Node* new_length = assembler.BuildAppendJSArray(
+ FAST_SMI_ELEMENTS, context, receiver, args, arg_index, &smi_transition);
+ args.PopAndReturn(new_length);
+ }
+
+ // If the argument is not a smi, then use a heavyweight SetProperty to
+ // transition the array for only the single next element. If the argument is
+ // a smi, the failure is due to some other reason and we should fall back on
+ // the most generic implementation for the rest of the array.
+ assembler.Bind(&smi_transition);
+ {
+ Node* arg = args.AtIndex(arg_index.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(arg), &default_label);
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+ // calling into the runtime to do the elements transition is overkill.
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ assembler.SmiConstant(STRICT));
+ assembler.Increment(arg_index);
+ assembler.GotoIfNotNumber(arg, &object_push);
+ assembler.Goto(&double_push);
+ }
+
+ assembler.Bind(&object_push_pre);
+ {
+ assembler.Branch(assembler.Int32GreaterThan(
+ kind, assembler.Int32Constant(FAST_HOLEY_ELEMENTS)),
+ &double_push, &object_push);
+ }
+
+ assembler.Bind(&object_push);
+ {
+ Node* new_length = assembler.BuildAppendJSArray(
+ FAST_ELEMENTS, context, receiver, args, arg_index, &default_label);
+ args.PopAndReturn(new_length);
+ }
+
+ assembler.Bind(&double_push);
+ {
+ Node* new_length =
+ assembler.BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, context, receiver,
+ args, arg_index, &double_transition);
+ args.PopAndReturn(new_length);
+ }
+
+ // If the argument is not a double, then use a heavyweight SetProperty to
+ // transition the array for only the single next element. If the argument is
+ // a double, the failure is due to some other reason and we should fall back
+ // on the most generic implementation for the rest of the array.
+ assembler.Bind(&double_transition);
+ {
+ Node* arg = args.AtIndex(arg_index.value());
+ assembler.GotoIfNumber(arg, &default_label);
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+ // calling into the runtime to do the elements transition is overkill.
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ assembler.SmiConstant(STRICT));
+ assembler.Increment(arg_index);
+ assembler.Goto(&object_push);
+ }
+
+ // Fallback that stores un-processed arguments using the full, heavyweight
+ // SetProperty machinery.
+ assembler.Bind(&default_label);
+ {
+ args.ForEach(
+ [&assembler, receiver, context, &arg_index](Node* arg) {
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver,
+ length, arg, assembler.SmiConstant(STRICT));
+ },
+ arg_index.value());
+ args.PopAndReturn(assembler.LoadJSArrayLength(receiver));
+ }
+
+ assembler.Bind(&runtime);
+ {
+ Node* target = assembler.LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+ assembler.TailCallStub(CodeFactory::ArrayPush(assembler.isolate()), context,
+ target, new_target, argc);
+ }
}
BUILTIN(ArrayPop) {
@@ -461,8 +608,9 @@ class ArrayConcatVisitor {
SeededNumberDictionary::cast(*storage_));
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+ Handle<JSObject> not_a_prototype_holder;
+ Handle<SeededNumberDictionary> result = SeededNumberDictionary::AtNumberPut(
+ dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -533,9 +681,10 @@ class ArrayConcatVisitor {
if (!element->IsTheHole(isolate_)) {
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
+ Handle<JSObject> not_a_prototype_holder;
Handle<SeededNumberDictionary> new_storage =
SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- false);
+ not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -1001,8 +1150,9 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// If estimated number of elements is more than half of length, a
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
- bool fast_case =
- is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
+ bool fast_case = is_array_species &&
+ (estimate_nof_elements * 2) >= estimate_result_length &&
+ isolate->IsIsConcatSpreadableLookupChainIntact();
if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
@@ -1202,7 +1352,7 @@ BUILTIN(ArrayConcat) {
Handle<Object> receiver = args.receiver();
// TODO(bmeurer): Do we really care about the exact exception message here?
- if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -1237,146 +1387,149 @@ BUILTIN(ArrayConcat) {
return Slow_ArrayConcat(&args, species, isolate);
}
-void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIsArray(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+ Node* object = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
- Label call_runtime(assembler), return_true(assembler),
- return_false(assembler);
+ Label call_runtime(&assembler), return_true(&assembler),
+ return_false(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(object), &return_false);
- Node* instance_type = assembler->LoadInstanceType(object);
+ assembler.GotoIf(assembler.TaggedIsSmi(object), &return_false);
+ Node* instance_type = assembler.LoadInstanceType(object);
- assembler->GotoIf(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
- &return_true);
+ assembler.GotoIf(assembler.Word32Equal(
+ instance_type, assembler.Int32Constant(JS_ARRAY_TYPE)),
+ &return_true);
// TODO(verwaest): Handle proxies in-place.
- assembler->Branch(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
- &call_runtime, &return_false);
+ assembler.Branch(assembler.Word32Equal(
+ instance_type, assembler.Int32Constant(JS_PROXY_TYPE)),
+ &call_runtime, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
- assembler->Bind(&call_runtime);
- assembler->Return(
- assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+ assembler.Bind(&call_runtime);
+ assembler.Return(
+ assembler.CallRuntime(Runtime::kArrayIsArray, context, object));
}
-void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIncludes(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* array = assembler->Parameter(0);
- Node* search_element = assembler->Parameter(1);
- Node* start_from = assembler->Parameter(2);
- Node* context = assembler->Parameter(3 + 2);
+ Node* array = assembler.Parameter(0);
+ Node* search_element = assembler.Parameter(1);
+ Node* start_from = assembler.Parameter(2);
+ Node* context = assembler.Parameter(3 + 2);
- Node* intptr_zero = assembler->IntPtrConstant(0);
- Node* intptr_one = assembler->IntPtrConstant(1);
+ Node* intptr_zero = assembler.IntPtrConstant(0);
+ Node* intptr_one = assembler.IntPtrConstant(1);
- Node* the_hole = assembler->TheHoleConstant();
- Node* undefined = assembler->UndefinedConstant();
- Node* heap_number_map = assembler->HeapNumberMapConstant();
+ Node* the_hole = assembler.TheHoleConstant();
+ Node* undefined = assembler.UndefinedConstant();
- Variable len_var(assembler, MachineType::PointerRepresentation()),
- index_var(assembler, MachineType::PointerRepresentation()),
- start_from_var(assembler, MachineType::PointerRepresentation());
+ Variable len_var(&assembler, MachineType::PointerRepresentation()),
+ index_var(&assembler, MachineType::PointerRepresentation()),
+ start_from_var(&assembler, MachineType::PointerRepresentation());
- Label init_k(assembler), return_true(assembler), return_false(assembler),
- call_runtime(assembler);
+ Label init_k(&assembler), return_true(&assembler), return_false(&assembler),
+ call_runtime(&assembler);
- Label init_len(assembler);
+ Label init_len(&assembler);
index_var.Bind(intptr_zero);
len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+ assembler.BranchIfFastJSArray(
+ array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &init_len, &call_runtime);
- assembler->Bind(&init_len);
+ assembler.Bind(&init_len);
{
// Handle case where JSArray length is not an Smi in the runtime
- Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+ Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
+ assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
- len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
- &return_false, &init_k);
+ len_var.Bind(assembler.SmiToWord(len));
+ assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
+ &return_false, &init_k);
}
- assembler->Bind(&init_k);
+ assembler.Bind(&init_k);
{
- Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
- init_k_zero(assembler), init_k_n(assembler);
- Node* tagged_n = assembler->ToInteger(context, start_from);
+ Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
+ init_k_zero(&assembler), init_k_n(&assembler);
+ Node* tagged_n = assembler.ToInteger(context, start_from);
- assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
- assembler->Bind(&init_k_smi);
+ assembler.Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiUntag(tagged_n));
- assembler->Goto(&init_k_n);
+ start_from_var.Bind(assembler.SmiUntag(tagged_n));
+ assembler.Goto(&init_k_n);
}
- assembler->Bind(&init_k_heap_num);
+ assembler.Bind(&init_k_heap_num);
{
- Label do_return_false(assembler);
+ Label do_return_false(&assembler);
// This round is lossless for all valid lengths.
- Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
- assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_false);
- start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
- assembler->TruncateFloat64ToWord32(fp_n)));
- assembler->Goto(&init_k_n);
-
- assembler->Bind(&do_return_false);
+ Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
+ Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
+ assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_false);
+ start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
+ assembler.TruncateFloat64ToWord32(fp_n)));
+ assembler.Goto(&init_k_n);
+
+ assembler.Bind(&do_return_false);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&return_false);
+ assembler.Goto(&return_false);
}
}
- assembler->Bind(&init_k_n);
+ assembler.Bind(&init_k_n);
{
- Label if_positive(assembler), if_negative(assembler), done(assembler);
- assembler->Branch(
- assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
+ Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
+ assembler.Branch(
+ assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
- assembler->Bind(&if_positive);
+ assembler.Bind(&if_positive);
{
index_var.Bind(start_from_var.value());
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&if_negative);
+ assembler.Bind(&if_negative);
{
index_var.Bind(
- assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
- assembler->Branch(
- assembler->IntPtrLessThan(index_var.value(), intptr_zero),
+ assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
+ assembler.Branch(
+ assembler.IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
- assembler->Bind(&init_k_zero);
+ assembler.Bind(&init_k_zero);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
}
}
@@ -1385,443 +1538,435 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
};
- Label if_smiorobjects(assembler), if_packed_doubles(assembler),
- if_holey_doubles(assembler);
+ Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
+ if_holey_doubles(&assembler);
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
&if_smiorobjects, &if_smiorobjects,
&if_packed_doubles, &if_holey_doubles};
- Node* map = assembler->LoadMap(array);
- Node* elements_kind = assembler->LoadMapElementsKind(map);
- Node* elements = assembler->LoadElements(array);
- assembler->Switch(elements_kind, &return_false, kElementsKind,
- element_kind_handlers, arraysize(kElementsKind));
+ Node* map = assembler.LoadMap(array);
+ Node* elements_kind = assembler.LoadMapElementsKind(map);
+ Node* elements = assembler.LoadElements(array);
+ assembler.Switch(elements_kind, &return_false, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
- assembler->Bind(&if_smiorobjects);
+ assembler.Bind(&if_smiorobjects);
{
- Variable search_num(assembler, MachineRepresentation::kFloat64);
- Label ident_loop(assembler, &index_var),
- heap_num_loop(assembler, &search_num),
- string_loop(assembler, &index_var), simd_loop(assembler),
- undef_loop(assembler, &index_var), not_smi(assembler),
- not_heap_num(assembler);
-
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &undef_loop);
- Node* map = assembler->LoadMap(search_element);
- assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
- &not_heap_num);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_heap_num);
- Node* search_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsStringInstanceType(search_type),
- &string_loop);
- assembler->GotoIf(
- assembler->Word32Equal(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(&assembler, &index_var),
+ heap_num_loop(&assembler, &search_num),
+ string_loop(&assembler, &index_var), simd_loop(&assembler),
+ undef_loop(&assembler, &index_var), not_smi(&assembler),
+ not_heap_num(&assembler);
+
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_smi);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler.LoadMap(search_element);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_heap_num);
+ Node* search_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
+ assembler.GotoIf(
+ assembler.Word32Equal(search_type,
+ assembler.Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
- assembler->Goto(&ident_loop);
+ assembler.Goto(&ident_loop);
- assembler->Bind(&ident_loop);
+ assembler.Bind(&ident_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, search_element),
- &return_true);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, search_element),
+ &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&ident_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&ident_loop);
}
- assembler->Bind(&undef_loop);
+ assembler.Bind(&undef_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, undefined),
- &return_true);
- assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
- &return_true);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, undefined), &return_true);
+ assembler.GotoIf(assembler.WordEqual(element_k, the_hole), &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&undef_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&undef_loop);
}
- assembler->Bind(&heap_num_loop);
+ assembler.Bind(&heap_num_loop);
{
- Label nan_loop(assembler, &index_var),
- not_nan_loop(assembler, &index_var);
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler), not_smi(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler), not_smi(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->SmiToFloat64(element_k)),
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.SmiToFloat64(element_k)),
&return_true, &continue_loop);
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->LoadHeapNumberValue(element_k)),
+ assembler.Bind(&not_smi);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
+ &continue_loop);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.LoadHeapNumberValue(element_k)),
&return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->BranchIfFloat64IsNaN(
- assembler->LoadHeapNumberValue(element_k), &return_true,
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
&continue_loop);
+ assembler.BranchIfFloat64IsNaN(assembler.LoadHeapNumberValue(element_k),
+ &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
}
- assembler->Bind(&string_loop);
+ assembler.Bind(&string_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->IsStringInstanceType(
- assembler->LoadInstanceType(element_k)),
- &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
+ &continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Callable callable = CodeFactory::StringEqual(assembler.isolate());
Node* result =
- assembler->CallStub(callable, context, search_element, element_k);
- assembler->Branch(
- assembler->WordEqual(assembler->BooleanConstant(true), result),
+ assembler.CallStub(callable, context, search_element, element_k);
+ assembler.Branch(
+ assembler.WordEqual(assembler.BooleanConstant(true), result),
&return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&string_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&string_loop);
}
- assembler->Bind(&simd_loop);
+ assembler.Bind(&simd_loop);
{
- Label continue_loop(assembler, &index_var),
- loop_body(assembler, &index_var);
- Node* map = assembler->LoadMap(search_element);
-
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler, &index_var),
+ loop_body(&assembler, &index_var);
+ Node* map = assembler.LoadMap(search_element);
+
+ assembler.Goto(&loop_body);
+ assembler.Bind(&loop_body);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- Node* map_k = assembler->LoadMap(element_k);
- assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_true, &continue_loop);
+ Node* map_k = assembler.LoadMap(element_k);
+ assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&loop_body);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&loop_body);
}
}
- assembler->Bind(&if_packed_doubles);
+ assembler.Bind(&if_packed_doubles);
{
- Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
- hole_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
+ search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_false);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_false);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
// Search for NaN
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
}
- assembler->Bind(&if_holey_doubles);
+ assembler.Bind(&if_holey_doubles);
{
- Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
- hole_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
+ search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &hole_loop);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_false);
+ assembler.Bind(&search_notnan);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &hole_loop);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_false);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
// Search for NaN
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
// Search for the Hole
- assembler->Bind(&hole_loop);
+ assembler.Bind(&hole_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Check if the element is a double hole, but don't load it.
- assembler->LoadFixedDoubleArrayElement(
+ assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::None(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&hole_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&hole_loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kArrayIncludes_Slow,
- context, array, search_element,
- start_from));
+ assembler.Bind(&call_runtime);
+ assembler.Return(assembler.CallRuntime(Runtime::kArrayIncludes_Slow, context,
+ array, search_element, start_from));
}
-void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* array = assembler->Parameter(0);
- Node* search_element = assembler->Parameter(1);
- Node* start_from = assembler->Parameter(2);
- Node* context = assembler->Parameter(3 + 2);
+ Node* array = assembler.Parameter(0);
+ Node* search_element = assembler.Parameter(1);
+ Node* start_from = assembler.Parameter(2);
+ Node* context = assembler.Parameter(3 + 2);
- Node* intptr_zero = assembler->IntPtrConstant(0);
- Node* intptr_one = assembler->IntPtrConstant(1);
+ Node* intptr_zero = assembler.IntPtrConstant(0);
+ Node* intptr_one = assembler.IntPtrConstant(1);
- Node* undefined = assembler->UndefinedConstant();
- Node* heap_number_map = assembler->HeapNumberMapConstant();
+ Node* undefined = assembler.UndefinedConstant();
- Variable len_var(assembler, MachineType::PointerRepresentation()),
- index_var(assembler, MachineType::PointerRepresentation()),
- start_from_var(assembler, MachineType::PointerRepresentation());
+ Variable len_var(&assembler, MachineType::PointerRepresentation()),
+ index_var(&assembler, MachineType::PointerRepresentation()),
+ start_from_var(&assembler, MachineType::PointerRepresentation());
- Label init_k(assembler), return_found(assembler), return_not_found(assembler),
- call_runtime(assembler);
+ Label init_k(&assembler), return_found(&assembler),
+ return_not_found(&assembler), call_runtime(&assembler);
- Label init_len(assembler);
+ Label init_len(&assembler);
index_var.Bind(intptr_zero);
len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+ assembler.BranchIfFastJSArray(
+ array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &init_len, &call_runtime);
- assembler->Bind(&init_len);
+ assembler.Bind(&init_len);
{
// Handle case where JSArray length is not an Smi in the runtime
- Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+ Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
+ assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
- len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
- &return_not_found, &init_k);
+ len_var.Bind(assembler.SmiToWord(len));
+ assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
+ &return_not_found, &init_k);
}
- assembler->Bind(&init_k);
+ assembler.Bind(&init_k);
{
- Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
- init_k_zero(assembler), init_k_n(assembler);
- Node* tagged_n = assembler->ToInteger(context, start_from);
+ Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
+ init_k_zero(&assembler), init_k_n(&assembler);
+ Node* tagged_n = assembler.ToInteger(context, start_from);
- assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
- assembler->Bind(&init_k_smi);
+ assembler.Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiUntag(tagged_n));
- assembler->Goto(&init_k_n);
+ start_from_var.Bind(assembler.SmiUntag(tagged_n));
+ assembler.Goto(&init_k_n);
}
- assembler->Bind(&init_k_heap_num);
+ assembler.Bind(&init_k_heap_num);
{
- Label do_return_not_found(assembler);
+ Label do_return_not_found(&assembler);
// This round is lossless for all valid lengths.
- Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
- assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_not_found);
- start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
- assembler->TruncateFloat64ToWord32(fp_n)));
- assembler->Goto(&init_k_n);
-
- assembler->Bind(&do_return_not_found);
+ Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
+ Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
+ assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_not_found);
+ start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
+ assembler.TruncateFloat64ToWord32(fp_n)));
+ assembler.Goto(&init_k_n);
+
+ assembler.Bind(&do_return_not_found);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&return_not_found);
+ assembler.Goto(&return_not_found);
}
}
- assembler->Bind(&init_k_n);
+ assembler.Bind(&init_k_n);
{
- Label if_positive(assembler), if_negative(assembler), done(assembler);
- assembler->Branch(
- assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
+ Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
+ assembler.Branch(
+ assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
- assembler->Bind(&if_positive);
+ assembler.Bind(&if_positive);
{
index_var.Bind(start_from_var.value());
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&if_negative);
+ assembler.Bind(&if_negative);
{
index_var.Bind(
- assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
- assembler->Branch(
- assembler->IntPtrLessThan(index_var.value(), intptr_zero),
+ assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
+ assembler.Branch(
+ assembler.IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
- assembler->Bind(&init_k_zero);
+ assembler.Bind(&init_k_zero);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
}
}
@@ -1830,384 +1975,387 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
};
- Label if_smiorobjects(assembler), if_packed_doubles(assembler),
- if_holey_doubles(assembler);
+ Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
+ if_holey_doubles(&assembler);
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
&if_smiorobjects, &if_smiorobjects,
&if_packed_doubles, &if_holey_doubles};
- Node* map = assembler->LoadMap(array);
- Node* elements_kind = assembler->LoadMapElementsKind(map);
- Node* elements = assembler->LoadElements(array);
- assembler->Switch(elements_kind, &return_not_found, kElementsKind,
- element_kind_handlers, arraysize(kElementsKind));
+ Node* map = assembler.LoadMap(array);
+ Node* elements_kind = assembler.LoadMapElementsKind(map);
+ Node* elements = assembler.LoadElements(array);
+ assembler.Switch(elements_kind, &return_not_found, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
- assembler->Bind(&if_smiorobjects);
+ assembler.Bind(&if_smiorobjects);
{
- Variable search_num(assembler, MachineRepresentation::kFloat64);
- Label ident_loop(assembler, &index_var),
- heap_num_loop(assembler, &search_num),
- string_loop(assembler, &index_var), simd_loop(assembler),
- undef_loop(assembler, &index_var), not_smi(assembler),
- not_heap_num(assembler);
-
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &undef_loop);
- Node* map = assembler->LoadMap(search_element);
- assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
- &not_heap_num);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_heap_num);
- Node* search_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsStringInstanceType(search_type),
- &string_loop);
- assembler->GotoIf(
- assembler->Word32Equal(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(&assembler, &index_var),
+ heap_num_loop(&assembler, &search_num),
+ string_loop(&assembler, &index_var), simd_loop(&assembler),
+ undef_loop(&assembler, &index_var), not_smi(&assembler),
+ not_heap_num(&assembler);
+
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_smi);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler.LoadMap(search_element);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_heap_num);
+ Node* search_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
+ assembler.GotoIf(
+ assembler.Word32Equal(search_type,
+ assembler.Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
- assembler->Goto(&ident_loop);
+ assembler.Goto(&ident_loop);
- assembler->Bind(&ident_loop);
+ assembler.Bind(&ident_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, search_element),
- &return_found);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, search_element),
+ &return_found);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&ident_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&ident_loop);
}
- assembler->Bind(&undef_loop);
+ assembler.Bind(&undef_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, undefined),
- &return_found);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, undefined),
+ &return_found);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&undef_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&undef_loop);
}
- assembler->Bind(&heap_num_loop);
+ assembler.Bind(&heap_num_loop);
{
- Label not_nan_loop(assembler, &index_var);
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ Label not_nan_loop(&assembler, &index_var);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler), not_smi(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler), not_smi(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->SmiToFloat64(element_k)),
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.SmiToFloat64(element_k)),
&return_found, &continue_loop);
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->LoadHeapNumberValue(element_k)),
+ assembler.Bind(&not_smi);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
+ &continue_loop);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.LoadHeapNumberValue(element_k)),
&return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&string_loop);
+ assembler.Bind(&string_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->IsStringInstanceType(
- assembler->LoadInstanceType(element_k)),
- &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
+ &continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Callable callable = CodeFactory::StringEqual(assembler.isolate());
Node* result =
- assembler->CallStub(callable, context, search_element, element_k);
- assembler->Branch(
- assembler->WordEqual(assembler->BooleanConstant(true), result),
+ assembler.CallStub(callable, context, search_element, element_k);
+ assembler.Branch(
+ assembler.WordEqual(assembler.BooleanConstant(true), result),
&return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&string_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&string_loop);
}
- assembler->Bind(&simd_loop);
+ assembler.Bind(&simd_loop);
{
- Label continue_loop(assembler, &index_var),
- loop_body(assembler, &index_var);
- Node* map = assembler->LoadMap(search_element);
-
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler, &index_var),
+ loop_body(&assembler, &index_var);
+ Node* map = assembler.LoadMap(search_element);
+
+ assembler.Goto(&loop_body);
+ assembler.Bind(&loop_body);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- Node* map_k = assembler->LoadMap(element_k);
- assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_found, &continue_loop);
+ Node* map_k = assembler.LoadMap(element_k);
+ assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&loop_body);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&loop_body);
}
}
- assembler->Bind(&if_packed_doubles);
+ assembler.Bind(&if_packed_doubles);
{
- Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_not_found);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_not_found);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&if_holey_doubles);
+ assembler.Bind(&if_holey_doubles);
{
- Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_not_found);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_not_found);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&return_found);
- assembler->Return(assembler->ChangeInt32ToTagged(index_var.value()));
+ assembler.Bind(&return_found);
+ assembler.Return(assembler.SmiTag(index_var.value()));
- assembler->Bind(&return_not_found);
- assembler->Return(assembler->NumberConstant(-1));
+ assembler.Bind(&return_not_found);
+ assembler.Return(assembler.NumberConstant(-1));
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kArrayIndexOf, context,
- array, search_element, start_from));
+ assembler.Bind(&call_runtime);
+ assembler.Return(assembler.CallRuntime(Runtime::kArrayIndexOf, context, array,
+ search_element, start_from));
}
namespace {
template <IterationKind kIterationKind>
-void Generate_ArrayPrototypeIterationMethod(CodeStubAssembler* assembler) {
+void Generate_ArrayPrototypeIterationMethod(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Variable var_array(assembler, MachineRepresentation::kTagged);
- Variable var_map(assembler, MachineRepresentation::kTagged);
- Variable var_type(assembler, MachineRepresentation::kWord32);
+ Variable var_array(&assembler, MachineRepresentation::kTagged);
+ Variable var_map(&assembler, MachineRepresentation::kTagged);
+ Variable var_type(&assembler, MachineRepresentation::kWord32);
- Label if_isnotobject(assembler, Label::kDeferred);
- Label create_array_iterator(assembler);
+ Label if_isnotobject(&assembler, Label::kDeferred);
+ Label create_array_iterator(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &if_isnotobject);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_isnotobject);
var_array.Bind(receiver);
- var_map.Bind(assembler->LoadMap(receiver));
- var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
- assembler->Branch(assembler->IsJSReceiverInstanceType(var_type.value()),
- &create_array_iterator, &if_isnotobject);
+ var_map.Bind(assembler.LoadMap(receiver));
+ var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+ assembler.Branch(assembler.IsJSReceiverInstanceType(var_type.value()),
+ &create_array_iterator, &if_isnotobject);
- assembler->Bind(&if_isnotobject);
+ assembler.Bind(&if_isnotobject);
{
- Callable callable = CodeFactory::ToObject(assembler->isolate());
- Node* result = assembler->CallStub(callable, context, receiver);
+ Callable callable = CodeFactory::ToObject(assembler.isolate());
+ Node* result = assembler.CallStub(callable, context, receiver);
var_array.Bind(result);
- var_map.Bind(assembler->LoadMap(result));
- var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
- assembler->Goto(&create_array_iterator);
+ var_map.Bind(assembler.LoadMap(result));
+ var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+ assembler.Goto(&create_array_iterator);
}
- assembler->Bind(&create_array_iterator);
- assembler->Return(assembler->CreateArrayIterator(
- var_array.value(), var_map.value(), var_type.value(), context,
- kIterationKind));
+ assembler.Bind(&create_array_iterator);
+ assembler.Return(
+ assembler.CreateArrayIterator(var_array.value(), var_map.value(),
+ var_type.value(), context, kIterationKind));
}
} // namespace
-void Builtins::Generate_ArrayPrototypeValues(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(assembler);
+void Builtins::Generate_ArrayPrototypeValues(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(state);
}
-void Builtins::Generate_ArrayPrototypeEntries(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(assembler);
+void Builtins::Generate_ArrayPrototypeEntries(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(state);
}
-void Builtins::Generate_ArrayPrototypeKeys(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(assembler);
+void Builtins::Generate_ArrayPrototypeKeys(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(state);
}
void Builtins::Generate_ArrayIteratorPrototypeNext(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
+
+ Handle<String> operation = assembler.factory()->NewStringFromAsciiChecked(
+ "Array Iterator.prototype.next", TENURED);
- Node* iterator = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* iterator = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Variable var_done(assembler, MachineRepresentation::kTagged);
+ Variable var_value(&assembler, MachineRepresentation::kTagged);
+ Variable var_done(&assembler, MachineRepresentation::kTagged);
// Required, or else `throw_bad_receiver` fails a DCHECK due to these
// variables not being bound along all paths, despite not being used.
- var_done.Bind(assembler->TrueConstant());
- var_value.Bind(assembler->UndefinedConstant());
+ var_done.Bind(assembler.TrueConstant());
+ var_value.Bind(assembler.UndefinedConstant());
- Label throw_bad_receiver(assembler, Label::kDeferred);
- Label set_done(assembler);
- Label allocate_key_result(assembler);
- Label allocate_entry_if_needed(assembler);
- Label allocate_iterator_result(assembler);
- Label generic_values(assembler);
+ Label throw_bad_receiver(&assembler, Label::kDeferred);
+ Label set_done(&assembler);
+ Label allocate_key_result(&assembler);
+ Label allocate_entry_if_needed(&assembler);
+ Label allocate_iterator_result(&assembler);
+ Label generic_values(&assembler);
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
- assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
- Node* instance_type = assembler->LoadInstanceType(iterator);
- assembler->GotoIf(
- assembler->Uint32LessThan(
- assembler->Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
- FIRST_ARRAY_ITERATOR_TYPE),
- assembler->Int32Sub(instance_type, assembler->Int32Constant(
- FIRST_ARRAY_ITERATOR_TYPE))),
+ assembler.GotoIf(assembler.TaggedIsSmi(iterator), &throw_bad_receiver);
+ Node* instance_type = assembler.LoadInstanceType(iterator);
+ assembler.GotoIf(
+ assembler.Uint32LessThan(
+ assembler.Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
+ FIRST_ARRAY_ITERATOR_TYPE),
+ assembler.Int32Sub(instance_type, assembler.Int32Constant(
+ FIRST_ARRAY_ITERATOR_TYPE))),
&throw_bad_receiver);
// Let a be O.[[IteratedObject]].
- Node* array = assembler->LoadObjectField(
+ Node* array = assembler.LoadObjectField(
iterator, JSArrayIterator::kIteratedObjectOffset);
// Let index be O.[[ArrayIteratorNextIndex]].
Node* index =
- assembler->LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
- Node* orig_map = assembler->LoadObjectField(
+ assembler.LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
+ Node* orig_map = assembler.LoadObjectField(
iterator, JSArrayIterator::kIteratedObjectMapOffset);
- Node* array_map = assembler->LoadMap(array);
+ Node* array_map = assembler.LoadMap(array);
- Label if_isfastarray(assembler), if_isnotfastarray(assembler);
+ Label if_isfastarray(&assembler), if_isnotfastarray(&assembler),
+ if_isdetached(&assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(orig_map, array_map), &if_isfastarray,
- &if_isnotfastarray);
+ assembler.Branch(assembler.WordEqual(orig_map, array_map), &if_isfastarray,
+ &if_isnotfastarray);
- assembler->Bind(&if_isfastarray);
+ assembler.Bind(&if_isfastarray);
{
- CSA_ASSERT(assembler,
- assembler->Word32Equal(assembler->LoadMapInstanceType(array_map),
- assembler->Int32Constant(JS_ARRAY_TYPE)));
+ CSA_ASSERT(&assembler,
+ assembler.Word32Equal(assembler.LoadMapInstanceType(array_map),
+ assembler.Int32Constant(JS_ARRAY_TYPE)));
- Node* length = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+ Node* length = assembler.LoadObjectField(array, JSArray::kLengthOffset);
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+ assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kNextIndexOffset,
- assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
- assembler->BitcastTaggedToWord(one)));
+ Node* one = assembler.SmiConstant(Smi::FromInt(1));
+ assembler.StoreObjectFieldNoWriteBarrier(iterator,
+ JSArrayIterator::kNextIndexOffset,
+ assembler.SmiAdd(index, one));
- var_done.Bind(assembler->FalseConstant());
- Node* elements = assembler->LoadElements(array);
+ var_done.Bind(assembler.FalseConstant());
+ Node* elements = assembler.LoadElements(array);
static int32_t kInstanceType[] = {
JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
@@ -2225,8 +2373,8 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
};
- Label packed_object_values(assembler), holey_object_values(assembler),
- packed_double_values(assembler), holey_double_values(assembler);
+ Label packed_object_values(&assembler), holey_object_values(&assembler),
+ packed_double_values(&assembler), holey_double_values(&assembler);
Label* kInstanceTypeHandlers[] = {
&allocate_key_result, &packed_object_values, &holey_object_values,
&packed_object_values, &holey_object_values, &packed_double_values,
@@ -2234,216 +2382,192 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
&packed_object_values, &holey_object_values, &packed_double_values,
&holey_double_values};
- assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
- assembler->Bind(&packed_object_values);
+ assembler.Bind(&packed_object_values);
{
- var_value.Bind(assembler->LoadFixedArrayElement(
+ var_value.Bind(assembler.LoadFixedArrayElement(
elements, index, 0, CodeStubAssembler::SMI_PARAMETERS));
- assembler->Goto(&allocate_entry_if_needed);
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&packed_double_values);
+ assembler.Bind(&packed_double_values);
{
- Node* value = assembler->LoadFixedDoubleArrayElement(
+ Node* value = assembler.LoadFixedDoubleArrayElement(
elements, index, MachineType::Float64(), 0,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&holey_object_values);
+ assembler.Bind(&holey_object_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
Node* invalid =
- assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
- assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
- &generic_values);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+ &generic_values);
- var_value.Bind(assembler->UndefinedConstant());
- Node* value = assembler->LoadFixedArrayElement(
+ var_value.Bind(assembler.UndefinedConstant());
+ Node* value = assembler.LoadFixedArrayElement(
elements, index, 0, CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(
- assembler->WordEqual(value, assembler->TheHoleConstant()),
- &allocate_entry_if_needed);
+ assembler.GotoIf(assembler.WordEqual(value, assembler.TheHoleConstant()),
+ &allocate_entry_if_needed);
var_value.Bind(value);
- assembler->Goto(&allocate_entry_if_needed);
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&holey_double_values);
+ assembler.Bind(&holey_double_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
Node* invalid =
- assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
- assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
- &generic_values);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+ &generic_values);
- var_value.Bind(assembler->UndefinedConstant());
- Node* value = assembler->LoadFixedDoubleArrayElement(
+ var_value.Bind(assembler.UndefinedConstant());
+ Node* value = assembler.LoadFixedDoubleArrayElement(
elements, index, MachineType::Float64(), 0,
CodeStubAssembler::SMI_PARAMETERS, &allocate_entry_if_needed);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
- assembler->Bind(&if_isnotfastarray);
+ assembler.Bind(&if_isnotfastarray);
{
- Label if_istypedarray(assembler), if_isgeneric(assembler);
+ Label if_istypedarray(&assembler), if_isgeneric(&assembler);
// If a is undefined, return CreateIterResultObject(undefined, true)
- assembler->GotoIf(
- assembler->WordEqual(array, assembler->UndefinedConstant()),
- &allocate_iterator_result);
+ assembler.GotoIf(assembler.WordEqual(array, assembler.UndefinedConstant()),
+ &allocate_iterator_result);
- Node* array_type = assembler->LoadInstanceType(array);
- assembler->Branch(
- assembler->Word32Equal(array_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Node* array_type = assembler.LoadInstanceType(array);
+ assembler.Branch(
+ assembler.Word32Equal(array_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&if_istypedarray, &if_isgeneric);
- assembler->Bind(&if_isgeneric);
+ assembler.Bind(&if_isgeneric);
{
- Label if_wasfastarray(assembler);
+ Label if_wasfastarray(&assembler);
Node* length = nullptr;
{
- Variable var_length(assembler, MachineRepresentation::kTagged);
- Label if_isarray(assembler), if_isnotarray(assembler), done(assembler);
- assembler->Branch(
- assembler->Word32Equal(array_type,
- assembler->Int32Constant(JS_ARRAY_TYPE)),
+ Variable var_length(&assembler, MachineRepresentation::kTagged);
+ Label if_isarray(&assembler), if_isnotarray(&assembler),
+ done(&assembler);
+ assembler.Branch(
+ assembler.Word32Equal(array_type,
+ assembler.Int32Constant(JS_ARRAY_TYPE)),
&if_isarray, &if_isnotarray);
- assembler->Bind(&if_isarray);
+ assembler.Bind(&if_isarray);
{
var_length.Bind(
- assembler->LoadObjectField(array, JSArray::kLengthOffset));
+ assembler.LoadObjectField(array, JSArray::kLengthOffset));
// Invalidate protector cell if needed
- assembler->Branch(
- assembler->WordNotEqual(orig_map, assembler->UndefinedConstant()),
+ assembler.Branch(
+ assembler.WordNotEqual(orig_map, assembler.UndefinedConstant()),
&if_wasfastarray, &done);
- assembler->Bind(&if_wasfastarray);
+ assembler.Bind(&if_wasfastarray);
{
- Label if_invalid(assembler, Label::kDeferred);
+ Label if_invalid(&assembler, Label::kDeferred);
// A fast array iterator transitioned to a slow iterator during
// iteration. Invalidate fast_array_iteration_prtoector cell to
// prevent potential deopt loops.
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectMapOffset,
- assembler->UndefinedConstant());
- assembler->GotoIf(
- assembler->Uint32LessThanOrEqual(
- instance_type, assembler->Int32Constant(
+ assembler.UndefinedConstant());
+ assembler.GotoIf(
+ assembler.Uint32LessThanOrEqual(
+ instance_type, assembler.Int32Constant(
JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&done);
- Node* invalid = assembler->SmiConstant(
- Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(
- Heap::kFastArrayIterationProtectorRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
- invalid);
- assembler->Goto(&done);
+ Node* invalid =
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell =
+ assembler.LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
+ invalid);
+ assembler.Goto(&done);
}
}
- assembler->Bind(&if_isnotarray);
+ assembler.Bind(&if_isnotarray);
{
- Node* length_string = assembler->HeapConstant(
- assembler->isolate()->factory()->length_string());
- Callable get_property =
- CodeFactory::GetProperty(assembler->isolate());
+ Node* length_string = assembler.HeapConstant(
+ assembler.isolate()->factory()->length_string());
+ Callable get_property = CodeFactory::GetProperty(assembler.isolate());
Node* length =
- assembler->CallStub(get_property, context, array, length_string);
- Callable to_length = CodeFactory::ToLength(assembler->isolate());
- var_length.Bind(assembler->CallStub(to_length, context, length));
- assembler->Goto(&done);
+ assembler.CallStub(get_property, context, array, length_string);
+ Callable to_length = CodeFactory::ToLength(assembler.isolate());
+ var_length.Bind(assembler.CallStub(to_length, context, length));
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
length = var_length.value();
}
- assembler->GotoUnlessNumberLessThan(index, length, &set_done);
+ assembler.GotoUnlessNumberLessThan(index, length, &set_done);
- assembler->StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
- assembler->NumberInc(index));
- var_done.Bind(assembler->FalseConstant());
+ assembler.StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+ assembler.NumberInc(index));
+ var_done.Bind(assembler.FalseConstant());
- assembler->Branch(
- assembler->Uint32LessThanOrEqual(
+ assembler.Branch(
+ assembler.Uint32LessThanOrEqual(
instance_type,
- assembler->Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+ assembler.Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&allocate_key_result, &generic_values);
- assembler->Bind(&generic_values);
+ assembler.Bind(&generic_values);
{
- Callable get_property = CodeFactory::GetProperty(assembler->isolate());
- var_value.Bind(
- assembler->CallStub(get_property, context, array, index));
- assembler->Goto(&allocate_entry_if_needed);
+ Callable get_property = CodeFactory::GetProperty(assembler.isolate());
+ var_value.Bind(assembler.CallStub(get_property, context, array, index));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
- assembler->Bind(&if_istypedarray);
+ assembler.Bind(&if_istypedarray);
{
- Node* length = nullptr;
- {
- Variable var_length(assembler, MachineRepresentation::kTagged);
- Label if_isdetached(assembler, Label::kDeferred),
- if_isnotdetached(assembler), done(assembler);
+ Node* buffer =
+ assembler.LoadObjectField(array, JSTypedArray::kBufferOffset);
+ assembler.GotoIf(assembler.IsDetachedBuffer(buffer), &if_isdetached);
- Node* buffer =
- assembler->LoadObjectField(array, JSTypedArray::kBufferOffset);
- assembler->Branch(assembler->IsDetachedBuffer(buffer), &if_isdetached,
- &if_isnotdetached);
+ Node* length =
+ assembler.LoadObjectField(array, JSTypedArray::kLengthOffset);
- assembler->Bind(&if_isnotdetached);
- {
- var_length.Bind(
- assembler->LoadObjectField(array, JSTypedArray::kLengthOffset));
- assembler->Goto(&done);
- }
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler->Bind(&if_isdetached);
- {
- // TODO(caitp): If IsDetached(buffer) is true, throw a TypeError, per
- // https://github.com/tc39/ecma262/issues/713
- var_length.Bind(assembler->SmiConstant(Smi::kZero));
- assembler->Goto(&done);
- }
-
- assembler->Bind(&done);
- length = var_length.value();
- }
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
-
- assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+ assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- assembler->StoreObjectFieldNoWriteBarrier(
+ Node* one = assembler.SmiConstant(1);
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kNextIndexOffset,
- assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
- assembler->BitcastTaggedToWord(one)));
- var_done.Bind(assembler->FalseConstant());
+ assembler.SmiAdd(index, one));
+ var_done.Bind(assembler.FalseConstant());
- Node* elements = assembler->LoadElements(array);
- Node* base_ptr = assembler->LoadObjectField(
+ Node* elements = assembler.LoadElements(array);
+ Node* base_ptr = assembler.LoadObjectField(
elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr = assembler->LoadObjectField(
- elements, FixedTypedArrayBase::kExternalPointerOffset);
- Node* data_ptr = assembler->IntPtrAdd(base_ptr, external_ptr);
+ Node* external_ptr = assembler.LoadObjectField(
+ elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* data_ptr = assembler.IntPtrAdd(
+ assembler.BitcastTaggedToWord(base_ptr), external_ptr);
static int32_t kInstanceType[] = {
JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
@@ -2467,10 +2591,10 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
};
- Label uint8_values(assembler), int8_values(assembler),
- uint16_values(assembler), int16_values(assembler),
- uint32_values(assembler), int32_values(assembler),
- float32_values(assembler), float64_values(assembler);
+ Label uint8_values(&assembler), int8_values(&assembler),
+ uint16_values(&assembler), int16_values(&assembler),
+ uint32_values(&assembler), int32_values(&assembler),
+ float32_values(&assembler), float64_values(&assembler);
Label* kInstanceTypeHandlers[] = {
&allocate_key_result, &uint8_values, &uint8_values,
&int8_values, &uint16_values, &int16_values,
@@ -2481,152 +2605,158 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
&float64_values,
};
- var_done.Bind(assembler->FalseConstant());
- assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ var_done.Bind(assembler.FalseConstant());
+ assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
- assembler->Bind(&uint8_values);
+ assembler.Bind(&uint8_values);
{
- Node* value_uint8 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint8 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_uint8));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_uint8));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int8_values);
+ assembler.Bind(&int8_values);
{
- Node* value_int8 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int8 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_int8));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_int8));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&uint16_values);
+ assembler.Bind(&uint16_values);
{
- Node* value_uint16 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint16 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT16_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_uint16));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_uint16));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int16_values);
+ assembler.Bind(&int16_values);
{
- Node* value_int16 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int16 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT16_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_int16));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_int16));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&uint32_values);
+ assembler.Bind(&uint32_values);
{
- Node* value_uint32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT32_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->ChangeUint32ToTagged(value_uint32));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.ChangeUint32ToTagged(value_uint32));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int32_values);
+ assembler.Bind(&int32_values);
{
- Node* value_int32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT32_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->ChangeInt32ToTagged(value_int32));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.ChangeInt32ToTagged(value_int32));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&float32_values);
+ assembler.Bind(&float32_values);
{
- Node* value_float32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_float32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, FLOAT32_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(
- assembler->ChangeFloat32ToFloat64(value_float32)));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(
+ assembler.ChangeFloat32ToFloat64(value_float32)));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&float64_values);
+ assembler.Bind(&float64_values);
{
- Node* value_float64 = assembler->LoadFixedTypedArrayElement(
+ Node* value_float64 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, FLOAT64_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value_float64));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value_float64));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
}
- assembler->Bind(&set_done);
+ assembler.Bind(&set_done);
{
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectOffset,
- assembler->UndefinedConstant());
- assembler->Goto(&allocate_iterator_result);
+ assembler.UndefinedConstant());
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_key_result);
+ assembler.Bind(&allocate_key_result);
{
var_value.Bind(index);
- var_done.Bind(assembler->FalseConstant());
- assembler->Goto(&allocate_iterator_result);
+ var_done.Bind(assembler.FalseConstant());
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_entry_if_needed);
+ assembler.Bind(&allocate_entry_if_needed);
{
- assembler->GotoIf(
- assembler->Int32GreaterThan(
+ assembler.GotoIf(
+ assembler.Int32GreaterThan(
instance_type,
- assembler->Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
+ assembler.Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
&allocate_iterator_result);
- Node* elements = assembler->AllocateFixedArray(FAST_ELEMENTS,
- assembler->Int32Constant(2));
- assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(0),
- index, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(1),
- var_value.value(), SKIP_WRITE_BARRIER);
-
- Node* entry = assembler->Allocate(JSArray::kSize);
- Node* map = assembler->LoadContextElement(
- assembler->LoadNativeContext(context),
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
-
- assembler->StoreMapNoWriteBarrier(entry, map);
- assembler->StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
- elements);
- assembler->StoreObjectFieldNoWriteBarrier(
- entry, JSArray::kLengthOffset, assembler->SmiConstant(Smi::FromInt(2)));
+ Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS,
+ assembler.IntPtrConstant(2));
+ assembler.StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
+ assembler.StoreFixedArrayElement(elements, 1, var_value.value(),
+ SKIP_WRITE_BARRIER);
+
+ Node* entry = assembler.Allocate(JSArray::kSize);
+ Node* map =
+ assembler.LoadContextElement(assembler.LoadNativeContext(context),
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+
+ assembler.StoreMapNoWriteBarrier(entry, map);
+ assembler.StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
+ elements);
+ assembler.StoreObjectFieldNoWriteBarrier(
+ entry, JSArray::kLengthOffset, assembler.SmiConstant(Smi::FromInt(2)));
var_value.Bind(entry);
- assembler->Goto(&allocate_iterator_result);
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_iterator_result);
+ assembler.Bind(&allocate_iterator_result);
{
- Node* result = assembler->Allocate(JSIteratorResult::kSize);
+ Node* result = assembler.Allocate(JSIteratorResult::kSize);
Node* map =
- assembler->LoadContextElement(assembler->LoadNativeContext(context),
- Context::ITERATOR_RESULT_MAP_INDEX);
- assembler->StoreMapNoWriteBarrier(result, map);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.LoadContextElement(assembler.LoadNativeContext(context),
+ Context::ITERATOR_RESULT_MAP_INDEX);
+ assembler.StoreMapNoWriteBarrier(result, map);
+ assembler.StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(
result, JSIteratorResult::kValueOffset, var_value.value());
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
result, JSIteratorResult::kDoneOffset, var_done.value());
- assembler->Return(result);
+ assembler.Return(result);
}
- assembler->Bind(&throw_bad_receiver);
+ assembler.Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- Node* result = assembler->CallRuntime(
+ Node* result = assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "Array Iterator.prototype.next", TENURED)),
- iterator);
- assembler->Return(result);
+ assembler.HeapConstant(operation), iterator);
+ assembler.Return(result);
+ }
+
+ assembler.Bind(&if_isdetached);
+ {
+ Node* message = assembler.SmiConstant(MessageTemplate::kDetachedOperation);
+ Node* result =
+ assembler.CallRuntime(Runtime::kThrowTypeError, context, message,
+ assembler.HeapConstant(operation));
+ assembler.Return(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index e7ccf95973..81232230ff 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -34,28 +35,32 @@ BUILTIN(BooleanConstructor_ConstructStub) {
}
// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
-void Builtins::Generate_BooleanPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeToString(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* value = assembler->ToThisValue(
+ Node* value = assembler.ToThisValue(
context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.toString");
- Node* result = assembler->LoadObjectField(value, Oddball::kToStringOffset);
- assembler->Return(result);
+ Node* result = assembler.LoadObjectField(value, Oddball::kToStringOffset);
+ assembler.Return(result);
}
// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
-void Builtins::Generate_BooleanPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* result = assembler->ToThisValue(
+ Node* result = assembler.ToThisValue(
context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf");
- assembler->Return(result);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor.cc b/deps/v8/src/builtins/builtins-constructor.cc
new file mode 100644
index 0000000000..db3ffb0b91
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-constructor.cc
@@ -0,0 +1,772 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-constructor.h"
+#include "src/ast/ast.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
+ Node* feedback_vector,
+ Node* slot,
+ Node* context) {
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Isolate* isolate = this->isolate();
+ Factory* factory = isolate->factory();
+ IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
+
+ // Create a new closure from the given function info in new space
+ Node* result = Allocate(JSFunction::kSize);
+
+ // Calculate the index of the map we should install on the function based on
+ // the FunctionKind and LanguageMode of the function.
+ // Note: Must be kept in sync with Context::FunctionMapIndex
+ Node* compiler_hints =
+ LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* is_strict = Word32And(
+ compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
+
+ Label if_normal(this), if_generator(this), if_async(this),
+ if_class_constructor(this), if_function_without_prototype(this),
+ load_map(this);
+ Variable map_index(this, MachineType::PointerRepresentation());
+
+ STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
+ Node* is_not_normal =
+ Word32And(compiler_hints,
+ Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
+ GotoUnless(is_not_normal, &if_normal);
+
+ Node* is_generator = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_generator, &if_generator);
+
+ Node* is_async = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_async, &if_async);
+
+ Node* is_class_constructor = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_class_constructor, &if_class_constructor);
+
+ if (FLAG_debug_code) {
+ // Function must be a function without a prototype.
+ CSA_ASSERT(
+ this,
+ Word32And(compiler_hints,
+ Int32Constant((FunctionKind::kAccessorFunction |
+ FunctionKind::kArrowFunction |
+ FunctionKind::kConciseMethod)
+ << SharedFunctionInfo::kFunctionKindShift)));
+ }
+ Goto(&if_function_without_prototype);
+
+ Bind(&if_normal);
+ {
+ map_index.Bind(SelectIntPtrConstant(is_strict,
+ Context::STRICT_FUNCTION_MAP_INDEX,
+ Context::SLOPPY_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_generator);
+ {
+ map_index.Bind(IntPtrConstant(Context::GENERATOR_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_async);
+ {
+ map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_class_constructor);
+ {
+ map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_function_without_prototype);
+ {
+ map_index.Bind(
+ IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&load_map);
+
+ // Get the function map in the current native context and set that
+ // as the map of the allocated object.
+ Node* native_context = LoadNativeContext(context);
+ Node* map_slot_value =
+ LoadFixedArrayElement(native_context, map_index.value());
+ StoreMapNoWriteBarrier(result, map_slot_value);
+
+ // Initialize the rest of the function.
+ Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
+ Node* empty_literals_array = HeapConstant(factory->empty_literals_array());
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
+ empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
+ empty_literals_array);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
+ shared_info);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
+ Handle<Code> lazy_builtin_handle(
+ isolate->builtins()->builtin(Builtins::kCompileLazy));
+ Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
+ Node* lazy_builtin_entry =
+ IntPtrAdd(BitcastTaggedToWord(lazy_builtin),
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeEntryOffset,
+ lazy_builtin_entry,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kNextFunctionLinkOffset,
+ UndefinedConstant());
+
+ return result;
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+ Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
+ Node* context = Parameter(FastNewClosureDescriptor::kContext);
+ Node* vector = Parameter(FastNewClosureDescriptor::kVector);
+ Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
+ Return(EmitFastNewClosure(shared, vector, slot, context));
+}
+
+TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
+ typedef FastNewObjectDescriptor Descriptor;
+ Node* context = Parameter(Descriptor::kContext);
+ Node* target = Parameter(Descriptor::kTarget);
+ Node* new_target = Parameter(Descriptor::kNewTarget);
+
+ Label call_runtime(this);
+
+ Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ Return(result);
+
+ Bind(&call_runtime);
+ TailCallRuntime(Runtime::kNewObject, context, target, new_target);
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
+ Node* target,
+ Node* new_target) {
+ Variable var_obj(this, MachineRepresentation::kTagged);
+ Label call_runtime(this), end(this);
+
+ Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ var_obj.Bind(result);
+ Goto(&end);
+
+ Bind(&call_runtime);
+ var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
+ Goto(&end);
+
+ Bind(&end);
+ return var_obj.value();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(
+ Node* context, Node* target, Node* new_target,
+ CodeAssemblerLabel* call_runtime) {
+ CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
+ CSA_ASSERT(this, IsJSReceiver(new_target));
+
+ // Verify that the new target is a JSFunction.
+ Label fast(this), end(this);
+ GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
+ Goto(call_runtime);
+
+ Bind(&fast);
+
+ // Load the initial map and verify that it's in fact a map.
+ Node* initial_map =
+ LoadObjectField(new_target, JSFunction::kPrototypeOrInitialMapOffset);
+ GotoIf(TaggedIsSmi(initial_map), call_runtime);
+ GotoIf(DoesntHaveInstanceType(initial_map, MAP_TYPE), call_runtime);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ Node* new_target_constructor =
+ LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
+ GotoIf(WordNotEqual(target, new_target_constructor), call_runtime);
+
+ Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
+ initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ Node* instance_size =
+ WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+ Node* object = Allocate(instance_size);
+ StoreMapNoWriteBarrier(object, initial_map);
+ Node* empty_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+ empty_array);
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset,
+ empty_array);
+
+ instance_size_words = ChangeUint32ToWord(LoadObjectField(
+ initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ instance_size =
+ WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+ // Perform in-object slack tracking if requested.
+ Node* bit_field3 = LoadMapBitField3(initial_map);
+ Label slack_tracking(this), finalize(this, Label::kDeferred), done(this);
+ GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+
+ // Initialize remaining fields.
+ {
+ Comment("no slack tracking");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ instance_size, Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ Bind(&slack_tracking);
+
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ Comment("update allocation count");
+ Node* new_bit_field3 = Int32Sub(
+ bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
+ new_bit_field3,
+ MachineRepresentation::kWord32);
+ GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size =
+ IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+ IntPtrConstant(kPointerSizeLog2)));
+
+ Comment("initialize filler fields (no finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (no finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ used_size, Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ // Finalize the instance size.
+ Bind(&finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size =
+ IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+ IntPtrConstant(kPointerSizeLog2)));
+
+ Comment("initialize filler fields (finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ used_size, Heap::kUndefinedValueRootIndex);
+
+ CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return object;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
+ Node* function, Node* slots, Node* context, ScopeType scope_type) {
+ slots = ChangeUint32ToWord(slots);
+
+ // TODO(ishell): Use CSA::OptimalParameterMode() here.
+ CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
+ Node* length = IntPtrAdd(slots, min_context_slots);
+ Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
+
+ // Create a new closure from the given function info in new space
+ Node* function_context = Allocate(size);
+
+ Heap::RootListIndex context_type;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ context_type = Heap::kEvalContextMapRootIndex;
+ break;
+ case FUNCTION_SCOPE:
+ context_type = Heap::kFunctionContextMapRootIndex;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ StoreMapNoWriteBarrier(function_context, context_type);
+ StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
+ SmiTag(length));
+
+ // Set up the fixed slots.
+ StoreFixedArrayElement(function_context, Context::CLOSURE_INDEX, function,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(function_context, Context::PREVIOUS_INDEX, context,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(function_context, Context::EXTENSION_INDEX,
+ TheHoleConstant(), SKIP_WRITE_BARRIER);
+
+ // Copy the native context from the previous context.
+ Node* native_context = LoadNativeContext(context);
+ StoreFixedArrayElement(function_context, Context::NATIVE_CONTEXT_INDEX,
+ native_context, SKIP_WRITE_BARRIER);
+
+ // Initialize the rest of the slots to undefined.
+ Node* undefined = UndefinedConstant();
+ BuildFastFixedArrayForEach(
+ function_context, FAST_ELEMENTS, min_context_slots, length,
+ [this, undefined](Node* context, Node* offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
+ undefined);
+ },
+ mode);
+
+ return function_context;
+}
+
+// static
+int ConstructorBuiltinsAssembler::MaximumFunctionContextSlots() {
+ return FLAG_test_small_max_function_context_stub_size ? kSmallMaximumSlots
+ : kMaximumSlots;
+}
+
+TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
+ Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+ Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Return(EmitFastNewFunctionContext(function, slots, context,
+ ScopeType::EVAL_SCOPE));
+}
+
+TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
+ Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+ Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Return(EmitFastNewFunctionContext(function, slots, context,
+ ScopeType::FUNCTION_SCOPE));
+}
+
+Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
+ switch (scope_type) {
+ case ScopeType::EVAL_SCOPE:
+ return FastNewFunctionContextEval();
+ case ScopeType::FUNCTION_SCOPE:
+ return FastNewFunctionContextFunction();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
+ Node* literal_index,
+ Node* pattern,
+ Node* flags,
+ Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label call_runtime(this, Label::kDeferred), end(this);
+
+ Variable result(this, MachineRepresentation::kTagged);
+
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* boilerplate =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ GotoIf(IsUndefined(boilerplate), &call_runtime);
+
+ {
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Node* copy = Allocate(size);
+ for (int offset = 0; offset < size; offset += kPointerSize) {
+ Node* value = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, value);
+ }
+ result.Bind(copy);
+ Goto(&end);
+ }
+
+ Bind(&call_runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, closure,
+ literal_index, pattern, flags));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+TF_BUILTIN(FastCloneRegExp, ConstructorBuiltinsAssembler) {
+ Node* closure = Parameter(FastCloneRegExpDescriptor::kClosure);
+ Node* literal_index = Parameter(FastCloneRegExpDescriptor::kLiteralIndex);
+ Node* pattern = Parameter(FastCloneRegExpDescriptor::kPattern);
+ Node* flags = Parameter(FastCloneRegExpDescriptor::kFlags);
+ Node* context = Parameter(FastCloneRegExpDescriptor::kContext);
+
+ Return(EmitFastCloneRegExp(closure, literal_index, pattern, flags, context));
+}
+
+Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
+ Node* boilerplate, Node* boilerplate_map, Node* boilerplate_elements,
+ Node* allocation_site, Node* capacity, ElementsKind kind) {
+ typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+ ParameterMode param_mode = OptimalParameterMode();
+
+ Node* length = LoadJSArrayLength(boilerplate);
+ capacity = TaggedToParameter(capacity, param_mode);
+
+ Node *array, *elements;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, boilerplate_map, length, allocation_site, capacity, param_mode);
+
+ Comment("copy elements header");
+ // Header consists of map and length.
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+ StoreMap(elements, LoadMap(boilerplate_elements));
+ {
+ int offset = FixedArrayBase::kLengthOffset;
+ StoreObjectFieldNoWriteBarrier(
+ elements, offset, LoadObjectField(boilerplate_elements, offset));
+ }
+
+ length = TaggedToParameter(length, param_mode);
+
+ Comment("copy boilerplate elements");
+ CopyFixedArrayElements(kind, boilerplate_elements, elements, length,
+ SKIP_WRITE_BARRIER, param_mode);
+ IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
+
+ return array;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
+ Node* closure, Node* literal_index, Node* context,
+ CodeAssemblerLabel* call_runtime, AllocationSiteMode allocation_site_mode) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label zero_capacity(this), cow_elements(this), fast_elements(this),
+ return_result(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ GotoIf(IsUndefined(allocation_site), call_runtime);
+ allocation_site =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ Node* boilerplate =
+ LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = LoadMap(boilerplate);
+ Node* boilerplate_elements = LoadElements(boilerplate);
+ Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
+ allocation_site =
+ allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
+
+ Node* zero = SmiConstant(Smi::kZero);
+ GotoIf(SmiEqual(capacity, zero), &zero_capacity);
+
+ Node* elements_map = LoadMap(boilerplate_elements);
+ GotoIf(IsFixedCOWArrayMap(elements_map), &cow_elements);
+
+ GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
+ {
+ Comment("fast double elements path");
+ if (FLAG_debug_code) {
+ Label correct_elements_map(this), abort(this, Label::kDeferred);
+ Branch(IsFixedDoubleArrayMap(elements_map), &correct_elements_map,
+ &abort);
+
+ Bind(&abort);
+ {
+ Node* abort_id = SmiConstant(
+ Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
+ CallRuntime(Runtime::kAbort, context, abort_id);
+ result.Bind(UndefinedConstant());
+ Goto(&return_result);
+ }
+ Bind(&correct_elements_map);
+ }
+
+ Node* array =
+ NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+ allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Bind(&fast_elements);
+ {
+ Comment("fast elements path");
+ Node* array =
+ NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+ allocation_site, capacity, FAST_ELEMENTS);
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Variable length(this, MachineRepresentation::kTagged),
+ elements(this, MachineRepresentation::kTagged);
+ Label allocate_without_elements(this);
+
+ Bind(&cow_elements);
+ {
+ Comment("fixed cow path");
+ length.Bind(LoadJSArrayLength(boilerplate));
+ elements.Bind(boilerplate_elements);
+
+ Goto(&allocate_without_elements);
+ }
+
+ Bind(&zero_capacity);
+ {
+ Comment("zero capacity path");
+ length.Bind(zero);
+ elements.Bind(LoadRoot(Heap::kEmptyFixedArrayRootIndex));
+
+ Goto(&allocate_without_elements);
+ }
+
+ Bind(&allocate_without_elements);
+ {
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ StoreObjectField(array, JSObject::kElementsOffset, elements.value());
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return result.value();
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
+ AllocationSiteMode allocation_site_mode) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Node* closure = Parameter(FastCloneShallowArrayDescriptor::kClosure);
+ Node* literal_index =
+ Parameter(FastCloneShallowArrayDescriptor::kLiteralIndex);
+ Node* constant_elements =
+ Parameter(FastCloneShallowArrayDescriptor::kConstantElements);
+ Node* context = Parameter(FastCloneShallowArrayDescriptor::kContext);
+ Label call_runtime(this, Label::kDeferred);
+ Return(EmitFastCloneShallowArray(closure, literal_index, context,
+ &call_runtime, allocation_site_mode));
+
+ Bind(&call_runtime);
+ {
+ Comment("call runtime");
+ Node* flags =
+ SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
+ (allocation_site_mode == TRACK_ALLOCATION_SITE
+ ? 0
+ : ArrayLiteral::kDisableMementos)));
+ Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+ literal_index, constant_elements, flags));
+ }
+}
+
+TF_BUILTIN(FastCloneShallowArrayTrack, ConstructorBuiltinsAssembler) {
+ CreateFastCloneShallowArrayBuiltin(TRACK_ALLOCATION_SITE);
+}
+
+TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
+ CreateFastCloneShallowArrayBuiltin(DONT_TRACK_ALLOCATION_SITE);
+}
+
+Handle<Code> Builtins::NewCloneShallowArray(
+ AllocationSiteMode allocation_mode) {
+ switch (allocation_mode) {
+ case TRACK_ALLOCATION_SITE:
+ return FastCloneShallowArrayTrack();
+ case DONT_TRACK_ALLOCATION_SITE:
+ return FastCloneShallowArrayDontTrack();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+// static
+int ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+ int literal_length) {
+ // This heuristic of setting empty literals to have
+ // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+ // runtime.
+ // TODO(verwaest): Unify this with the heuristic in the runtime.
+ return literal_length == 0
+ ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+ : literal_length;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
+ CodeAssemblerLabel* call_runtime, Node* closure, Node* literals_index,
+ Node* properties_count) {
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site =
+ LoadFixedArrayElement(literals_array, literals_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ GotoIf(IsUndefined(allocation_site), call_runtime);
+
+ // Calculate the object and allocation size based on the properties count.
+ Node* object_size = IntPtrAdd(WordShl(properties_count, kPointerSizeLog2),
+ IntPtrConstant(JSObject::kHeaderSize));
+ Node* allocation_size = object_size;
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_size =
+ IntPtrAdd(object_size, IntPtrConstant(AllocationMemento::kSize));
+ }
+ Node* boilerplate =
+ LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = LoadMap(boilerplate);
+ Node* instance_size = LoadMapInstanceSize(boilerplate_map);
+ Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
+ GotoUnless(WordEqual(instance_size, size_in_words), call_runtime);
+
+ Node* copy = Allocate(allocation_size);
+
+ // Copy boilerplate elements.
+ Variable offset(this, MachineType::PointerRepresentation());
+ offset.Bind(IntPtrConstant(-kHeapObjectTag));
+ Node* end_offset = IntPtrAdd(object_size, offset.value());
+ Label loop_body(this, &offset), loop_check(this, &offset);
+ // We should always have an object size greater than zero.
+ Goto(&loop_body);
+ Bind(&loop_body);
+ {
+ // The Allocate above guarantees that the copy lies in new space. This
+ // allows us to skip write barriers. This is necessary since we may also be
+ // copying unboxed doubles.
+ Node* field = Load(MachineType::IntPtr(), boilerplate, offset.value());
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+ offset.value(), field);
+ Goto(&loop_check);
+ }
+ Bind(&loop_check);
+ {
+ offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+ GotoUnless(IntPtrGreaterThanOrEqual(offset.value(), end_offset),
+ &loop_body);
+ }
+
+ if (FLAG_allocation_site_pretenuring) {
+ Node* memento = InnerAllocate(copy, object_size);
+ StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+ Node* memento_create_count = LoadObjectField(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset);
+ memento_create_count =
+ SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
+ StoreObjectFieldNoWriteBarrier(allocation_site,
+ AllocationSite::kPretenureCreateCountOffset,
+ memento_create_count);
+ }
+
+ // TODO(verwaest): Allocate and fill in double boxes.
+ return copy;
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowObjectBuiltin(
+ int properties_count) {
+ DCHECK_GE(properties_count, 0);
+ DCHECK_LE(properties_count, kMaximumClonedShallowObjectProperties);
+ Label call_runtime(this);
+ Node* closure = Parameter(0);
+ Node* literals_index = Parameter(1);
+
+ Node* properties_count_node =
+ IntPtrConstant(FastCloneShallowObjectPropertiesCount(properties_count));
+ Node* copy = EmitFastCloneShallowObject(
+ &call_runtime, closure, literals_index, properties_count_node);
+ Return(copy);
+
+ Bind(&call_runtime);
+ Node* constant_properties = Parameter(2);
+ Node* flags = Parameter(3);
+ Node* context = Parameter(4);
+ TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literals_index, constant_properties, flags);
+}
+
+#define SHALLOW_OBJECT_BUILTIN(props) \
+ TF_BUILTIN(FastCloneShallowObject##props, ConstructorBuiltinsAssembler) { \
+ CreateFastCloneShallowObjectBuiltin(props); \
+ }
+
+SHALLOW_OBJECT_BUILTIN(0);
+SHALLOW_OBJECT_BUILTIN(1);
+SHALLOW_OBJECT_BUILTIN(2);
+SHALLOW_OBJECT_BUILTIN(3);
+SHALLOW_OBJECT_BUILTIN(4);
+SHALLOW_OBJECT_BUILTIN(5);
+SHALLOW_OBJECT_BUILTIN(6);
+
+Handle<Code> Builtins::NewCloneShallowObject(int length) {
+ switch (length) {
+ case 0:
+ return FastCloneShallowObject0();
+ case 1:
+ return FastCloneShallowObject1();
+ case 2:
+ return FastCloneShallowObject2();
+ case 3:
+ return FastCloneShallowObject3();
+ case 4:
+ return FastCloneShallowObject4();
+ case 5:
+ return FastCloneShallowObject5();
+ case 6:
+ return FastCloneShallowObject6();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
new file mode 100644
index 0000000000..68629a7bd3
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class ConstructorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ConstructorBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
+ Node* context);
+ Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
+ ScopeType scope_type);
+ static int MaximumFunctionContextSlots();
+
+ Node* EmitFastCloneRegExp(Node* closure, Node* literal_index, Node* pattern,
+ Node* flags, Node* context);
+ Node* EmitFastCloneShallowArray(Node* closure, Node* literal_index,
+ Node* context,
+ CodeAssemblerLabel* call_runtime,
+ AllocationSiteMode allocation_site_mode);
+
+ // Maximum number of elements in copied array (chosen so that even an array
+ // backed by a double backing store will fit into new-space).
+ static const int kMaximumClonedShallowArrayElements =
+ JSArray::kInitialMaxFastElementArray * kPointerSize / kDoubleSize;
+
+ void CreateFastCloneShallowArrayBuiltin(
+ AllocationSiteMode allocation_site_mode);
+
+ // Maximum number of properties in copied objects.
+ static const int kMaximumClonedShallowObjectProperties = 6;
+ static int FastCloneShallowObjectPropertiesCount(int literal_length);
+ Node* EmitFastCloneShallowObject(CodeAssemblerLabel* call_runtime,
+ Node* closure, Node* literals_index,
+ Node* properties_count);
+ void CreateFastCloneShallowObjectBuiltin(int properties_count);
+
+ Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
+
+ Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
+ CodeAssemblerLabel* call_runtime);
+
+ private:
+ static const int kMaximumSlots = 0x8000;
+ static const int kSmallMaximumSlots = 10;
+
+ Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
+ Node* boilerplate_elements, Node* allocation_site,
+ Node* capacity, ElementsKind kind);
+
+ // FastNewFunctionContext can only allocate closures which fit in the
+ // new space.
+ STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
+ FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
index 0eaf79ca23..177b739c4b 100644
--- a/deps/v8/src/builtins/builtins-conversion.cc
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -95,110 +96,116 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
} // anonymous namespace
void Builtins::Generate_NonPrimitiveToPrimitive_Default(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kDefault);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kDefault);
}
void Builtins::Generate_NonPrimitiveToPrimitive_Number(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kNumber);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kNumber);
}
void Builtins::Generate_NonPrimitiveToPrimitive_String(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kString);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kString);
}
-void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_StringToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->StringToNumber(context, input));
+ assembler.Return(assembler.StringToNumber(context, input));
}
-void Builtins::Generate_ToName(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToName(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToName(context, input));
+ assembler.Return(assembler.ToName(context, input));
}
// static
-void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_NonNumberToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->NonNumberToNumber(context, input));
+ assembler.Return(assembler.NonNumberToNumber(context, input));
}
// ES6 section 7.1.3 ToNumber ( argument )
-void Builtins::Generate_ToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToNumber(context, input));
+ assembler.Return(assembler.ToNumber(context, input));
}
-void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label is_number(assembler);
- Label runtime(assembler);
+ Label is_number(&assembler);
+ Label runtime(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(input), &is_number);
+ assembler.GotoIf(assembler.TaggedIsSmi(input), &is_number);
- Node* input_map = assembler->LoadMap(input);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+ Node* input_map = assembler.LoadMap(input);
+ Node* input_instance_type = assembler.LoadMapInstanceType(input_map);
- Label not_string(assembler);
- assembler->GotoUnless(assembler->IsStringInstanceType(input_instance_type),
- &not_string);
- assembler->Return(input);
+ Label not_string(&assembler);
+ assembler.GotoUnless(assembler.IsStringInstanceType(input_instance_type),
+ &not_string);
+ assembler.Return(input);
- Label not_heap_number(assembler);
+ Label not_heap_number(&assembler);
- assembler->Bind(&not_string);
+ assembler.Bind(&not_string);
{
- assembler->GotoUnless(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &not_heap_number);
- assembler->Goto(&is_number);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(input_map),
+ &not_heap_number);
+ assembler.Goto(&is_number);
}
- assembler->Bind(&is_number);
- { assembler->Return(assembler->NumberToString(context, input)); }
+ assembler.Bind(&is_number);
+ { assembler.Return(assembler.NumberToString(context, input)); }
- assembler->Bind(&not_heap_number);
+ assembler.Bind(&not_heap_number);
{
- assembler->GotoIf(
- assembler->Word32NotEqual(input_instance_type,
- assembler->Int32Constant(ODDBALL_TYPE)),
+ assembler.GotoIf(
+ assembler.Word32NotEqual(input_instance_type,
+ assembler.Int32Constant(ODDBALL_TYPE)),
&runtime);
- assembler->Return(
- assembler->LoadObjectField(input, Oddball::kToStringOffset));
+ assembler.Return(
+ assembler.LoadObjectField(input, Oddball::kToStringOffset));
}
- assembler->Bind(&runtime);
+ assembler.Bind(&runtime);
{
- assembler->Return(
- assembler->CallRuntime(Runtime::kToString, context, input));
+ assembler.Return(assembler.CallRuntime(Runtime::kToString, context, input));
}
}
@@ -283,194 +290,200 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
} // anonymous namespace
void Builtins::Generate_OrdinaryToPrimitive_Number(
- CodeStubAssembler* assembler) {
- Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kNumber);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kNumber);
}
void Builtins::Generate_OrdinaryToPrimitive_String(
- CodeStubAssembler* assembler) {
- Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kString);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kString);
}
// ES6 section 7.1.2 ToBoolean ( argument )
-void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToBoolean(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* value = assembler->Parameter(Descriptor::kArgument);
+ Node* value = assembler.Parameter(Descriptor::kArgument);
- Label return_true(assembler), return_false(assembler);
- assembler->BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+ Label return_true(&assembler), return_false(&assembler);
+ assembler.BranchIfToBooleanIsTrue(value, &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
-void Builtins::Generate_ToLength(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(1);
+ Node* context = assembler.Parameter(1);
// We might need to loop once for ToNumber conversion.
- Variable var_len(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_len);
- var_len.Bind(assembler->Parameter(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_len(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_len);
+ var_len.Bind(assembler.Parameter(0));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Shared entry points.
- Label return_len(assembler),
- return_two53minus1(assembler, Label::kDeferred),
- return_zero(assembler, Label::kDeferred);
+ Label return_len(&assembler),
+ return_two53minus1(&assembler, Label::kDeferred),
+ return_zero(&assembler, Label::kDeferred);
// Load the current {len} value.
Node* len = var_len.value();
// Check if {len} is a positive Smi.
- assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+ assembler.GotoIf(assembler.TaggedIsPositiveSmi(len), &return_len);
// Check if {len} is a (negative) Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(len), &return_zero);
+ assembler.GotoIf(assembler.TaggedIsSmi(len), &return_zero);
// Check if {len} is a HeapNumber.
- Label if_lenisheapnumber(assembler),
- if_lenisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
- &if_lenisheapnumber, &if_lenisnotheapnumber);
+ Label if_lenisheapnumber(&assembler),
+ if_lenisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(len)),
+ &if_lenisheapnumber, &if_lenisnotheapnumber);
- assembler->Bind(&if_lenisheapnumber);
+ assembler.Bind(&if_lenisheapnumber);
{
// Load the floating-point value of {len}.
- Node* len_value = assembler->LoadHeapNumberValue(len);
+ Node* len_value = assembler.LoadHeapNumberValue(len);
// Check if {len} is not greater than zero.
- assembler->GotoUnless(assembler->Float64GreaterThan(
- len_value, assembler->Float64Constant(0.0)),
- &return_zero);
+ assembler.GotoUnless(assembler.Float64GreaterThan(
+ len_value, assembler.Float64Constant(0.0)),
+ &return_zero);
// Check if {len} is greater than or equal to 2^53-1.
- assembler->GotoIf(
- assembler->Float64GreaterThanOrEqual(
- len_value, assembler->Float64Constant(kMaxSafeInteger)),
+ assembler.GotoIf(
+ assembler.Float64GreaterThanOrEqual(
+ len_value, assembler.Float64Constant(kMaxSafeInteger)),
&return_two53minus1);
// Round the {len} towards -Infinity.
- Node* value = assembler->Float64Floor(len_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Node* value = assembler.Float64Floor(len_value);
+ Node* result = assembler.ChangeFloat64ToTagged(value);
+ assembler.Return(result);
}
- assembler->Bind(&if_lenisnotheapnumber);
+ assembler.Bind(&if_lenisnotheapnumber);
{
// Need to convert {len} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_len.Bind(assembler->CallStub(callable, context, len));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_len.Bind(assembler.CallStub(callable, context, len));
+ assembler.Goto(&loop);
}
- assembler->Bind(&return_len);
- assembler->Return(var_len.value());
+ assembler.Bind(&return_len);
+ assembler.Return(var_len.value());
- assembler->Bind(&return_two53minus1);
- assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+ assembler.Bind(&return_two53minus1);
+ assembler.Return(assembler.NumberConstant(kMaxSafeInteger));
- assembler->Bind(&return_zero);
- assembler->Return(assembler->SmiConstant(Smi::kZero));
+ assembler.Bind(&return_zero);
+ assembler.Return(assembler.SmiConstant(Smi::kZero));
}
}
-void Builtins::Generate_ToInteger(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToInteger(compiler::CodeAssemblerState* state) {
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- compiler::Node* input = assembler->Parameter(Descriptor::kArgument);
- compiler::Node* context = assembler->Parameter(Descriptor::kContext);
+ compiler::Node* input = assembler.Parameter(Descriptor::kArgument);
+ compiler::Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToInteger(context, input));
+ assembler.Return(assembler.ToInteger(context, input));
}
// ES6 section 7.1.13 ToObject (argument)
-void Builtins::Generate_ToObject(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToObject(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
- if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
- if_wrapjsvalue(assembler);
+ Label if_number(&assembler, Label::kDeferred), if_notsmi(&assembler),
+ if_jsreceiver(&assembler), if_noconstructor(&assembler, Label::kDeferred),
+ if_wrapjsvalue(&assembler);
- Node* object = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Variable constructor_function_index_var(assembler,
+ Variable constructor_function_index_var(&assembler,
MachineType::PointerRepresentation());
- assembler->Branch(assembler->TaggedIsSmi(object), &if_number, &if_notsmi);
+ assembler.Branch(assembler.TaggedIsSmi(object), &if_number, &if_notsmi);
- assembler->Bind(&if_notsmi);
- Node* map = assembler->LoadMap(object);
+ assembler.Bind(&if_notsmi);
+ Node* map = assembler.LoadMap(object);
- assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
+ assembler.GotoIf(assembler.IsHeapNumberMap(map), &if_number);
- Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
- &if_jsreceiver);
+ Node* instance_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsJSReceiverInstanceType(instance_type),
+ &if_jsreceiver);
Node* constructor_function_index =
- assembler->LoadMapConstructorFunctionIndex(map);
- assembler->GotoIf(assembler->WordEqual(constructor_function_index,
- assembler->IntPtrConstant(
- Map::kNoConstructorFunctionIndex)),
- &if_noconstructor);
+ assembler.LoadMapConstructorFunctionIndex(map);
+ assembler.GotoIf(assembler.WordEqual(constructor_function_index,
+ assembler.IntPtrConstant(
+ Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
constructor_function_index_var.Bind(constructor_function_index);
- assembler->Goto(&if_wrapjsvalue);
+ assembler.Goto(&if_wrapjsvalue);
- assembler->Bind(&if_number);
+ assembler.Bind(&if_number);
constructor_function_index_var.Bind(
- assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
- assembler->Goto(&if_wrapjsvalue);
-
- assembler->Bind(&if_wrapjsvalue);
- Node* native_context = assembler->LoadNativeContext(context);
- Node* constructor = assembler->LoadFixedArrayElement(
- native_context, constructor_function_index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* initial_map = assembler->LoadObjectField(
+ assembler.IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+ assembler.Goto(&if_wrapjsvalue);
+
+ assembler.Bind(&if_wrapjsvalue);
+ Node* native_context = assembler.LoadNativeContext(context);
+ Node* constructor = assembler.LoadFixedArrayElement(
+ native_context, constructor_function_index_var.value());
+ Node* initial_map = assembler.LoadObjectField(
constructor, JSFunction::kPrototypeOrInitialMapOffset);
- Node* js_value = assembler->Allocate(JSValue::kSize);
- assembler->StoreMapNoWriteBarrier(js_value, initial_map);
- assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
- assembler->Return(js_value);
-
- assembler->Bind(&if_noconstructor);
- assembler->TailCallRuntime(
+ Node* js_value = assembler.Allocate(JSValue::kSize);
+ assembler.StoreMapNoWriteBarrier(js_value, initial_map);
+ assembler.StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectField(js_value, JSValue::kValueOffset, object);
+ assembler.Return(js_value);
+
+ assembler.Bind(&if_noconstructor);
+ assembler.TailCallRuntime(
Runtime::kThrowUndefinedOrNullToObject, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "ToObject", TENURED)));
+ assembler.HeapConstant(
+ assembler.factory()->NewStringFromAsciiChecked("ToObject", TENURED)));
- assembler->Bind(&if_jsreceiver);
- assembler->Return(object);
+ assembler.Bind(&if_jsreceiver);
+ assembler.Return(object);
}
// ES6 section 12.5.5 typeof operator
-void Builtins::Generate_Typeof(CodeStubAssembler* assembler) {
+void Builtins::Generate_Typeof(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeofDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->Typeof(object, context));
+ assembler.Return(assembler.Typeof(object, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 949620b6b2..df74321093 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/dateparser-inl.h"
namespace v8 {
@@ -209,7 +210,7 @@ BUILTIN(DateConstructor_ConstructStub) {
if (argc == 0) {
time_val = JSDate::CurrentTimeValue(isolate);
} else if (argc == 1) {
- Handle<Object> value = args.at<Object>(1);
+ Handle<Object> value = args.at(1);
if (value->IsJSDate()) {
time_val = Handle<JSDate>::cast(value)->value()->Number();
} else {
@@ -226,37 +227,37 @@ BUILTIN(DateConstructor_ConstructStub) {
} else {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
+ Object::ToNumber(args.at(1)));
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
+ Object::ToNumber(args.at(2)));
double year = year_object->Number();
double month = month_object->Number();
double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
if (argc >= 3) {
Handle<Object> date_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at<Object>(3)));
+ Object::ToNumber(args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms_object,
+ Object::ToNumber(args.at(7)));
ms = ms_object->Number();
}
}
@@ -306,38 +307,37 @@ BUILTIN(DateUTC) {
if (argc >= 1) {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
+ Object::ToNumber(args.at(1)));
year = year_object->Number();
if (argc >= 2) {
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
+ Object::ToNumber(args.at(2)));
month = month_object->Number();
if (argc >= 3) {
Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object,
- Object::ToNumber(args.at<Object>(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ isolate, ms_object, Object::ToNumber(args.at(7)));
ms = ms_object->Number();
}
}
@@ -394,11 +394,11 @@ BUILTIN(DatePrototypeSetFullYear) {
dt = day;
}
if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
+ Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
m = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
+ Handle<Object> date = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -425,15 +425,15 @@ BUILTIN(DatePrototypeSetHours) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
+ Handle<Object> min = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
m = min->Number();
if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
+ Handle<Object> sec = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
+ Handle<Object> ms = args.at(4);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -482,11 +482,11 @@ BUILTIN(DatePrototypeSetMinutes) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
+ Handle<Object> sec = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
+ Handle<Object> ms = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -514,7 +514,7 @@ BUILTIN(DatePrototypeSetMonth) {
double m = month->Number();
double dt = day;
if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
+ Handle<Object> date = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -541,7 +541,7 @@ BUILTIN(DatePrototypeSetSeconds) {
double s = sec->Number();
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
+ Handle<Object> ms = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -595,11 +595,11 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
dt = day;
}
if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
+ Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
m = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
+ Handle<Object> date = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -625,15 +625,15 @@ BUILTIN(DatePrototypeSetUTCHours) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
+ Handle<Object> min = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
m = min->Number();
if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
+ Handle<Object> sec = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
+ Handle<Object> ms = args.at(4);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -680,11 +680,11 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
+ Handle<Object> sec = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
+ Handle<Object> ms = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -711,7 +711,7 @@ BUILTIN(DatePrototypeSetUTCMonth) {
double m = month->Number();
double dt = day;
if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
+ Handle<Object> date = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -737,7 +737,7 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
double s = sec->Number();
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
+ Handle<Object> ms = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -825,22 +825,6 @@ BUILTIN(DatePrototypeToUTCString) {
return *isolate->factory()->NewStringFromAsciiChecked(buffer);
}
-// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
-BUILTIN(DatePrototypeValueOf) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
- return date->value();
-}
-
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DatePrototypeToPrimitive) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
- Handle<Object> hint = args.at<Object>(1);
- RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
-}
-
// ES6 section B.2.4.1 Date.prototype.getYear ( )
BUILTIN(DatePrototypeGetYear) {
HandleScope scope(isolate);
@@ -908,9 +892,10 @@ BUILTIN(DatePrototypeToJson) {
}
}
-// static
-void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
- int field_index) {
+namespace {
+
+void Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
+ int field_index) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
@@ -952,7 +937,7 @@ void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
Node* function = assembler->ExternalConstant(
ExternalReference::get_date_field_function(assembler->isolate()));
Node* result = assembler->CallCFunction2(
- MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), function, receiver, field_index_smi);
assembler->Return(result);
}
@@ -965,100 +950,223 @@ void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
}
}
+} // namespace
+
// static
-void Builtins::Generate_DatePrototypeGetDate(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDay);
+void Builtins::Generate_DatePrototypeGetDate(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDay);
}
// static
-void Builtins::Generate_DatePrototypeGetDay(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kWeekday);
+void Builtins::Generate_DatePrototypeGetDay(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kWeekday);
}
// static
-void Builtins::Generate_DatePrototypeGetFullYear(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kYear);
+void Builtins::Generate_DatePrototypeGetFullYear(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kYear);
}
// static
-void Builtins::Generate_DatePrototypeGetHours(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kHour);
+void Builtins::Generate_DatePrototypeGetHours(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kHour);
}
// static
void Builtins::Generate_DatePrototypeGetMilliseconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMillisecond);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecond);
}
// static
-void Builtins::Generate_DatePrototypeGetMinutes(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMinute);
+void Builtins::Generate_DatePrototypeGetMinutes(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMinute);
}
// static
-void Builtins::Generate_DatePrototypeGetMonth(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMonth);
+void Builtins::Generate_DatePrototypeGetMonth(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMonth);
}
// static
-void Builtins::Generate_DatePrototypeGetSeconds(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kSecond);
+void Builtins::Generate_DatePrototypeGetSeconds(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kSecond);
}
// static
-void Builtins::Generate_DatePrototypeGetTime(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDateValue);
+void Builtins::Generate_DatePrototypeGetTime(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
}
// static
void Builtins::Generate_DatePrototypeGetTimezoneOffset(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kTimezoneOffset);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kTimezoneOffset);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDate(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDate(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDayUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDay(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kWeekdayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDay(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kWeekdayUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCFullYear(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kYearUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kYearUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCHours(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kHourUTC);
+void Builtins::Generate_DatePrototypeGetUTCHours(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kHourUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCMilliseconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMillisecondUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecondUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCMinutes(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMinuteUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMinuteUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCMonth(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMonthUTC);
+void Builtins::Generate_DatePrototypeGetUTCMonth(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMonthUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCSeconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kSecondUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kSecondUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
+}
+
+// static
+void Builtins::Generate_DatePrototypeToPrimitive(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler.Parameter(0);
+ Node* hint = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
+
+ // Check if the {receiver} is actually a JSReceiver.
+ Label receiver_is_invalid(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &receiver_is_invalid);
+ assembler.GotoUnless(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
+
+ // Dispatch to the appropriate OrdinaryToPrimitive builtin.
+ Label hint_is_number(&assembler), hint_is_string(&assembler),
+ hint_is_invalid(&assembler, Label::kDeferred);
+
+ // Fast cases for internalized strings.
+ Node* number_string = assembler.LoadRoot(Heap::knumber_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, number_string), &hint_is_number);
+ Node* default_string = assembler.LoadRoot(Heap::kdefault_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, default_string), &hint_is_string);
+ Node* string_string = assembler.LoadRoot(Heap::kstring_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, string_string), &hint_is_string);
+
+ // Slow-case with actual string comparisons.
+ Callable string_equal = CodeFactory::StringEqual(assembler.isolate());
+ assembler.GotoIf(assembler.TaggedIsSmi(hint), &hint_is_invalid);
+ assembler.GotoUnless(assembler.IsString(hint), &hint_is_invalid);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, number_string),
+ assembler.TrueConstant()),
+ &hint_is_number);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, default_string),
+ assembler.TrueConstant()),
+ &hint_is_string);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, string_string),
+ assembler.TrueConstant()),
+ &hint_is_string);
+ assembler.Goto(&hint_is_invalid);
+
+ // Use the OrdinaryToPrimitive builtin to convert to a Number.
+ assembler.Bind(&hint_is_number);
+ {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(
+ assembler.isolate(), OrdinaryToPrimitiveHint::kNumber);
+ Node* result = assembler.CallStub(callable, context, receiver);
+ assembler.Return(result);
+ }
+
+ // Use the OrdinaryToPrimitive builtin to convert to a String.
+ assembler.Bind(&hint_is_string);
+ {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(
+ assembler.isolate(), OrdinaryToPrimitiveHint::kString);
+ Node* result = assembler.CallStub(callable, context, receiver);
+ assembler.Return(result);
+ }
+
+ // Raise a TypeError if the {hint} is invalid.
+ assembler.Bind(&hint_is_invalid);
+ {
+ Node* result =
+ assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
+ assembler.Return(result);
+ }
+
+ // Raise a TypeError if the {receiver} is not a JSReceiver instance.
+ assembler.Bind(&receiver_is_invalid);
+ {
+ Node* result = assembler.CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
+ "Date.prototype [ @@toPrimitive ]", TENURED)),
+ receiver);
+ assembler.Return(result);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 9a8ee796b5..818e09a722 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/compiler.h"
#include "src/string-builder.h"
@@ -42,8 +43,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
if (i > 1) builder.AppendCharacter(',');
Handle<String> param;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, param, Object::ToString(isolate, args.at<Object>(i)),
- Object);
+ isolate, param, Object::ToString(isolate, args.at(i)), Object);
param = String::Flatten(param);
builder.AppendString(param);
// If the formal parameters string include ) - an illegal
@@ -61,14 +61,13 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
- builder.AppendCString("\n/**/");
+ builder.AppendCString("\n/*``*/");
}
builder.AppendCString(") {\n");
if (argc > 0) {
Handle<String> body;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
- Object);
+ isolate, body, Object::ToString(isolate, args.at(argc)), Object);
builder.AppendString(body);
}
builder.AppendCString("\n})");
@@ -179,9 +178,9 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
Handle<Object> this_arg = isolate->factory()->undefined_value();
ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
if (args.length() > 1) {
- this_arg = args.at<Object>(1);
+ this_arg = args.at(1);
for (int i = 2; i < args.length(); ++i) {
- argv[i - 2] = args.at<Object>(i);
+ argv[i - 2] = args.at(i);
}
}
Handle<JSBoundFunction> function;
@@ -255,6 +254,184 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
+void Builtins::Generate_FastFunctionPrototypeBind(
+ compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ CodeStubAssembler assembler(state);
+ Label slow(&assembler);
+
+ Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+ CodeStubArguments args(&assembler, argc);
+
+ // Check that receiver has instance type of JS_FUNCTION_TYPE
+ Node* receiver = args.GetReceiver();
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &slow);
+
+ Node* receiver_map = assembler.LoadMap(receiver);
+ Node* instance_type = assembler.LoadMapInstanceType(receiver_map);
+ assembler.GotoIf(
+ assembler.Word32NotEqual(instance_type,
+ assembler.Int32Constant(JS_FUNCTION_TYPE)),
+ &slow);
+
+ // Disallow binding of slow-mode functions. We need to figure out whether the
+ // length and name property are in the original state.
+ assembler.Comment("Disallow binding of slow-mode functions");
+ assembler.GotoIf(assembler.IsDictionaryMap(receiver_map), &slow);
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. In that case, their value can be recomputed even if
+ // the actual value on the object changes.
+ assembler.Comment("Check descriptor array length");
+ Node* descriptors = assembler.LoadMapDescriptors(receiver_map);
+ Node* descriptors_length = assembler.LoadFixedArrayBaseLength(descriptors);
+ assembler.GotoIf(assembler.SmiLessThanOrEqual(descriptors_length,
+ assembler.SmiConstant(1)),
+ &slow);
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. In that case, their value can be recomputed even if
+ // the actual value on the object changes.
+ assembler.Comment("Check name and length properties");
+ const int length_index = JSFunction::kLengthDescriptorIndex;
+ Node* maybe_length = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index));
+ assembler.GotoIf(
+ assembler.WordNotEqual(maybe_length,
+ assembler.LoadRoot(Heap::klength_stringRootIndex)),
+ &slow);
+
+ Node* maybe_length_accessor = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(length_index));
+ assembler.GotoIf(assembler.TaggedIsSmi(maybe_length_accessor), &slow);
+ Node* length_value_map = assembler.LoadMap(maybe_length_accessor);
+ assembler.GotoUnless(assembler.IsAccessorInfoMap(length_value_map), &slow);
+
+ const int name_index = JSFunction::kNameDescriptorIndex;
+ Node* maybe_name = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(name_index));
+ assembler.GotoIf(
+ assembler.WordNotEqual(maybe_name,
+ assembler.LoadRoot(Heap::kname_stringRootIndex)),
+ &slow);
+
+ Node* maybe_name_accessor = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(name_index));
+ assembler.GotoIf(assembler.TaggedIsSmi(maybe_name_accessor), &slow);
+ Node* name_value_map = assembler.LoadMap(maybe_name_accessor);
+ assembler.GotoUnless(assembler.IsAccessorInfoMap(name_value_map), &slow);
+
+ // Choose the right bound function map based on whether the target is
+ // constructable.
+ assembler.Comment("Choose the right bound function map");
+ Variable bound_function_map(&assembler, MachineRepresentation::kTagged);
+ Label with_constructor(&assembler);
+ CodeStubAssembler::VariableList vars({&bound_function_map}, assembler.zone());
+ Node* native_context = assembler.LoadNativeContext(context);
+
+ Label map_done(&assembler, vars);
+ Node* bit_field = assembler.LoadMapBitField(receiver_map);
+ int mask = static_cast<int>(1 << Map::kIsConstructor);
+ assembler.GotoIf(assembler.IsSetWord32(bit_field, mask), &with_constructor);
+
+ bound_function_map.Bind(assembler.LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+ assembler.Goto(&map_done);
+
+ assembler.Bind(&with_constructor);
+ bound_function_map.Bind(assembler.LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+ assembler.Goto(&map_done);
+
+ assembler.Bind(&map_done);
+
+ // Verify that __proto__ matches that of a the target bound function.
+ assembler.Comment("Verify that __proto__ matches target bound function");
+ Node* prototype = assembler.LoadMapPrototype(receiver_map);
+ Node* expected_prototype =
+ assembler.LoadMapPrototype(bound_function_map.value());
+ assembler.GotoIf(assembler.WordNotEqual(prototype, expected_prototype),
+ &slow);
+
+ // Allocate the arguments array.
+ assembler.Comment("Allocate the arguments array");
+ Variable argument_array(&assembler, MachineRepresentation::kTagged);
+ Label empty_arguments(&assembler);
+ Label arguments_done(&assembler, &argument_array);
+ assembler.GotoIf(
+ assembler.Uint32LessThanOrEqual(argc, assembler.Int32Constant(1)),
+ &empty_arguments);
+ Node* elements_length = assembler.ChangeUint32ToWord(
+ assembler.Int32Sub(argc, assembler.Int32Constant(1)));
+ Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS, elements_length);
+ Variable index(&assembler, MachineType::PointerRepresentation());
+ index.Bind(assembler.IntPtrConstant(0));
+ CodeStubAssembler::VariableList foreach_vars({&index}, assembler.zone());
+ args.ForEach(foreach_vars,
+ [&assembler, elements, &index](compiler::Node* arg) {
+ assembler.StoreFixedArrayElement(elements, index.value(), arg);
+ assembler.Increment(index);
+ },
+ assembler.IntPtrConstant(1));
+ argument_array.Bind(elements);
+ assembler.Goto(&arguments_done);
+
+ assembler.Bind(&empty_arguments);
+ argument_array.Bind(assembler.EmptyFixedArrayConstant());
+ assembler.Goto(&arguments_done);
+
+ assembler.Bind(&arguments_done);
+
+ // Determine bound receiver.
+ assembler.Comment("Determine bound receiver");
+ Variable bound_receiver(&assembler, MachineRepresentation::kTagged);
+ Label has_receiver(&assembler);
+ Label receiver_done(&assembler, &bound_receiver);
+ assembler.GotoIf(assembler.Word32NotEqual(argc, assembler.Int32Constant(0)),
+ &has_receiver);
+ bound_receiver.Bind(assembler.UndefinedConstant());
+ assembler.Goto(&receiver_done);
+
+ assembler.Bind(&has_receiver);
+ bound_receiver.Bind(args.AtIndex(0));
+ assembler.Goto(&receiver_done);
+
+ assembler.Bind(&receiver_done);
+
+ // Allocate the resulting bound function.
+ assembler.Comment("Allocate the resulting bound function");
+ Node* bound_function = assembler.Allocate(JSBoundFunction::kSize);
+ assembler.StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
+ assembler.StoreObjectFieldNoWriteBarrier(bound_function,
+ JSBoundFunction::kBoundThisOffset,
+ bound_receiver.value());
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSBoundFunction::kBoundArgumentsOffset,
+ argument_array.value());
+ Node* empty_fixed_array = assembler.EmptyFixedArrayConstant();
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kPropertiesOffset, empty_fixed_array);
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kElementsOffset, empty_fixed_array);
+
+ args.PopAndReturn(bound_function);
+ assembler.Bind(&slow);
+
+ Node* target = assembler.LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+ assembler.TailCallStub(
+ CodeFactory::FunctionPrototypeBind(assembler.isolate()), context, target,
+ new_target, argc);
+}
+
// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
// can tailcall to the builtin directly.
RUNTIME_FUNCTION(Runtime_FunctionBind) {
@@ -283,14 +460,15 @@ BUILTIN(FunctionPrototypeToString) {
// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
void Builtins::Generate_FunctionPrototypeHasInstance(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
using compiler::Node;
+ CodeStubAssembler assembler(state);
- Node* f = assembler->Parameter(0);
- Node* v = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* result = assembler->OrdinaryHasInstance(context, f, v);
- assembler->Return(result);
+ Node* f = assembler.Parameter(0);
+ Node* v = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
+ Node* result = assembler.OrdinaryHasInstance(context, f, v);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-generator.cc b/deps/v8/src/builtins/builtins-generator.cc
index fe1f2d2304..d22c3cdd64 100644
--- a/deps/v8/src/builtins/builtins-generator.cc
+++ b/deps/v8/src/builtins/builtins-generator.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -65,18 +65,20 @@ void Generate_GeneratorPrototypeResume(
assembler->Bind(&if_receiverisclosed);
{
+ Callable create_iter_result_object =
+ CodeFactory::CreateIterResultObject(assembler->isolate());
+
// The {receiver} is closed already.
Node* result = nullptr;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
- context, assembler->UndefinedConstant(),
- assembler->BooleanConstant(true));
+ result = assembler->CallStub(create_iter_result_object, context,
+ assembler->UndefinedConstant(),
+ assembler->TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result =
- assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
- value, assembler->BooleanConstant(true));
+ result = assembler->CallStub(create_iter_result_object, context, value,
+ assembler->TrueConstant());
break;
case JSGeneratorObject::kThrow:
result = assembler->CallRuntime(Runtime::kThrow, context, value);
@@ -96,20 +98,26 @@ void Generate_GeneratorPrototypeResume(
} // anonymous namespace
// ES6 section 25.3.1.2 Generator.prototype.next ( value )
-void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
+void Builtins::Generate_GeneratorPrototypeNext(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kNext,
"[Generator].prototype.next");
}
// ES6 section 25.3.1.3 Generator.prototype.return ( value )
-void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
+void Builtins::Generate_GeneratorPrototypeReturn(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kReturn,
"[Generator].prototype.return");
}
// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
-void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
+void Builtins::Generate_GeneratorPrototypeThrow(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kThrow,
"[Generator].prototype.throw");
}
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 1fa0967aa9..6c97a0bbad 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/compiler.h"
#include "src/uri.h"
@@ -101,111 +101,110 @@ BUILTIN(GlobalEval) {
}
// ES6 section 18.2.2 isFinite ( number )
-void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsFinite(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(4);
+ Node* context = assembler.Parameter(4);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(&assembler), return_false(&assembler);
// We might need to loop once for ToNumber conversion.
- Variable var_num(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_num);
- var_num.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_num(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_num);
+ var_num.Bind(assembler.Parameter(1));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Load the current {num} value.
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->TaggedIsSmi(num), &return_true);
+ assembler.GotoIf(assembler.TaggedIsSmi(num), &return_true);
// Check if {num} is a HeapNumber.
- Label if_numisheapnumber(assembler),
- if_numisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
- assembler->HeapNumberMapConstant()),
- &if_numisheapnumber, &if_numisnotheapnumber);
+ Label if_numisheapnumber(&assembler),
+ if_numisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+ &if_numisheapnumber, &if_numisnotheapnumber);
- assembler->Bind(&if_numisheapnumber);
+ assembler.Bind(&if_numisheapnumber);
{
// Check if {num} contains a finite, non-NaN value.
- Node* num_value = assembler->LoadHeapNumberValue(num);
- assembler->BranchIfFloat64IsNaN(
- assembler->Float64Sub(num_value, num_value), &return_false,
- &return_true);
+ Node* num_value = assembler.LoadHeapNumberValue(num);
+ assembler.BranchIfFloat64IsNaN(assembler.Float64Sub(num_value, num_value),
+ &return_false, &return_true);
}
- assembler->Bind(&if_numisnotheapnumber);
+ assembler.Bind(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_num.Bind(assembler->CallStub(callable, context, num));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_num.Bind(assembler.CallStub(callable, context, num));
+ assembler.Goto(&loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
// ES6 section 18.2.3 isNaN ( number )
-void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsNaN(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(4);
+ Node* context = assembler.Parameter(4);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(&assembler), return_false(&assembler);
// We might need to loop once for ToNumber conversion.
- Variable var_num(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_num);
- var_num.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_num(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_num);
+ var_num.Bind(assembler.Parameter(1));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Load the current {num} value.
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->TaggedIsSmi(num), &return_false);
+ assembler.GotoIf(assembler.TaggedIsSmi(num), &return_false);
// Check if {num} is a HeapNumber.
- Label if_numisheapnumber(assembler),
- if_numisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
- assembler->HeapNumberMapConstant()),
- &if_numisheapnumber, &if_numisnotheapnumber);
+ Label if_numisheapnumber(&assembler),
+ if_numisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+ &if_numisheapnumber, &if_numisnotheapnumber);
- assembler->Bind(&if_numisheapnumber);
+ assembler.Bind(&if_numisheapnumber);
{
// Check if {num} contains a NaN.
- Node* num_value = assembler->LoadHeapNumberValue(num);
- assembler->BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
+ Node* num_value = assembler.LoadHeapNumberValue(num);
+ assembler.BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
}
- assembler->Bind(&if_numisnotheapnumber);
+ assembler.Bind(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_num.Bind(assembler->CallStub(callable, context, num));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_num.Bind(assembler.CallStub(callable, context, num));
+ assembler.Goto(&loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
index 88597f8add..42b35d0d2f 100644
--- a/deps/v8/src/builtins/builtins-handler.cc
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -4,6 +4,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/accessor-assembler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
@@ -12,59 +14,46 @@ namespace v8 {
namespace internal {
void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler::GenerateKeyedLoadICMegamorphic(state);
+}
+
+void Builtins::Generate_KeyedLoadIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadICGeneric(&p);
-}
-
-void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm);
-}
-void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-}
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
-void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name,
+ slot, vector);
}
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
-}
-
-void KeyedStoreICMegamorphic(CodeStubAssembler* assembler, LanguageMode mode) {
+void Builtins::Generate_KeyedLoadIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
- typedef StoreWithVectorDescriptor Descriptor;
+ typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- KeyedStoreGenericGenerator::Generate(assembler, &p, mode);
+ assembler.TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver,
+ name);
}
void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
- CodeStubAssembler* assembler) {
- KeyedStoreICMegamorphic(assembler, SLOPPY);
+ compiler::CodeAssemblerState* state) {
+ KeyedStoreGenericGenerator::Generate(state, SLOPPY);
}
void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
- CodeStubAssembler* assembler) {
- KeyedStoreICMegamorphic(assembler, STRICT);
+ compiler::CodeAssemblerState* state) {
+ KeyedStoreGenericGenerator::Generate(state, STRICT);
}
void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
@@ -75,80 +64,149 @@ void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
KeyedStoreIC::GenerateSlow(masm);
}
-void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadGlobalIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadGlobalWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
- vector);
+ assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot,
+ vector);
}
-void Builtins::Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadGlobalIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadGlobalWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
- vector);
+ assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name);
}
void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
}
-void Builtins::Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
- slot, vector);
+ assembler.TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+ slot, vector);
}
-void Builtins::Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
-}
+TF_BUILTIN(LoadIC_Normal, CodeStubAssembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
-void Builtins::Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label slow(this);
+ {
+ Node* properties = LoadProperties(receiver);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, name, &found,
+ &var_name_index, &slow);
+ Bind(&found);
+ {
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+ &var_details, &var_value);
+ Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+ context, receiver, &slow);
+ Return(value);
+ }
+ }
+
+ Bind(&slow);
+ TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+void Builtins::Generate_LoadIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+ assembler.TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_StoreIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef StoreWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
+ assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
-void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
+TF_BUILTIN(StoreIC_Normal, CodeStubAssembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label slow(this);
+ {
+ Node* properties = LoadProperties(receiver);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, name, &found,
+ &var_name_index, &slow);
+ Bind(&found);
+ {
+ const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
+ NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ Node* details = LoadFixedArrayElement(properties, var_name_index.value(),
+ kNameToDetailsOffset);
+ // Check that the property is a writable data property (no accessor).
+ const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ STATIC_ASSERT(kData == 0);
+ GotoIf(IsSetSmi(details, kTypeAndReadOnlyMask), &slow);
+ const int kNameToValueOffset =
+ (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(properties, var_name_index.value(), value,
+ UPDATE_WRITE_BARRIER, kNameToValueOffset);
+ Return(value);
+ }
+ }
+
+ Bind(&slow);
+ TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
+ receiver, name);
}
void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
@@ -156,30 +214,33 @@ void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
}
namespace {
-void Generate_StoreIC_Slow(CodeStubAssembler* assembler,
+void Generate_StoreIC_Slow(compiler::CodeAssemblerState* state,
LanguageMode language_mode) {
typedef compiler::Node Node;
typedef StoreWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* lang_mode = assembler->SmiConstant(Smi::FromInt(language_mode));
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* lang_mode = assembler.SmiConstant(Smi::FromInt(language_mode));
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- assembler->TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
- value, lang_mode);
+ assembler.TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
+ value, lang_mode);
}
} // anonymous namespace
-void Builtins::Generate_StoreIC_SlowSloppy(CodeStubAssembler* assembler) {
- Generate_StoreIC_Slow(assembler, SLOPPY);
+void Builtins::Generate_StoreIC_SlowSloppy(
+ compiler::CodeAssemblerState* state) {
+ Generate_StoreIC_Slow(state, SLOPPY);
}
-void Builtins::Generate_StoreIC_SlowStrict(CodeStubAssembler* assembler) {
- Generate_StoreIC_Slow(assembler, STRICT);
+void Builtins::Generate_StoreIC_SlowStrict(
+ compiler::CodeAssemblerState* state) {
+ Generate_StoreIC_Slow(state, STRICT);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-ic.cc b/deps/v8/src/builtins/builtins-ic.cc
new file mode 100644
index 0000000000..398d512dcf
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-ic.cc
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/accessor-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+TF_BUILTIN(LoadIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadIC(state());
+}
+
+TF_BUILTIN(KeyedLoadIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedLoadICTF(state());
+}
+
+TF_BUILTIN(LoadICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadICTrampoline(state());
+}
+
+TF_BUILTIN(KeyedLoadICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedLoadICTrampolineTF(state());
+}
+
+TF_BUILTIN(StoreIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreIC(state());
+}
+
+TF_BUILTIN(StoreICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreICTrampoline(state());
+}
+
+TF_BUILTIN(StoreICStrict, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreIC(state());
+}
+
+TF_BUILTIN(StoreICStrictTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreICTrampoline(state());
+}
+
+TF_BUILTIN(KeyedStoreIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTF(state(), SLOPPY);
+}
+
+TF_BUILTIN(KeyedStoreICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), SLOPPY);
+}
+
+TF_BUILTIN(KeyedStoreICStrict, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTF(state(), STRICT);
+}
+
+TF_BUILTIN(KeyedStoreICStrictTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), STRICT);
+}
+
+TF_BUILTIN(LoadGlobalIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalIC(state(), NOT_INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICInsideTypeof, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalIC(state(), INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), NOT_INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICInsideTypeofTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), INSIDE_TYPEOF);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index bec6ff3645..f94ed0c16f 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
@@ -54,86 +55,263 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
// TurboFan support builtins.
void Builtins::Generate_CopyFastSmiOrObjectElements(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CopyFastSmiOrObjectElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* object = assembler.Parameter(Descriptor::kObject);
// Load the {object}s elements.
- Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
+ Node* source = assembler.LoadObjectField(object, JSObject::kElementsOffset);
- CodeStubAssembler::ParameterMode mode = assembler->OptimalParameterMode();
- Node* length = assembler->UntagParameter(
- assembler->LoadFixedArrayBaseLength(source), mode);
+ CodeStubAssembler::ParameterMode mode = assembler.OptimalParameterMode();
+ Node* length = assembler.TaggedToParameter(
+ assembler.LoadFixedArrayBaseLength(source), mode);
// Check if we can allocate in new space.
ElementsKind kind = FAST_ELEMENTS;
int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
- Label if_newspace(assembler), if_oldspace(assembler);
- assembler->Branch(
- assembler->UintPtrLessThan(
- length, assembler->IntPtrOrSmiConstant(max_elements, mode)),
+ Label if_newspace(&assembler), if_oldspace(&assembler);
+ assembler.Branch(
+ assembler.UintPtrOrSmiLessThan(
+ length, assembler.IntPtrOrSmiConstant(max_elements, mode), mode),
&if_newspace, &if_oldspace);
- assembler->Bind(&if_newspace);
+ assembler.Bind(&if_newspace);
{
- Node* target = assembler->AllocateFixedArray(kind, length, mode);
- assembler->CopyFixedArrayElements(kind, source, target, length,
- SKIP_WRITE_BARRIER, mode);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
- assembler->Return(target);
+ Node* target = assembler.AllocateFixedArray(kind, length, mode);
+ assembler.CopyFixedArrayElements(kind, source, target, length,
+ SKIP_WRITE_BARRIER, mode);
+ assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler.Return(target);
}
- assembler->Bind(&if_oldspace);
+ assembler.Bind(&if_oldspace);
{
- Node* target = assembler->AllocateFixedArray(
- kind, length, mode, CodeStubAssembler::kPretenured);
- assembler->CopyFixedArrayElements(kind, source, target, length,
- UPDATE_WRITE_BARRIER, mode);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
- assembler->Return(target);
+ Node* target = assembler.AllocateFixedArray(kind, length, mode,
+ CodeStubAssembler::kPretenured);
+ assembler.CopyFixedArrayElements(kind, source, target, length,
+ UPDATE_WRITE_BARRIER, mode);
+ assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler.Return(target);
}
}
-void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
+void Builtins::Generate_GrowFastDoubleElements(
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef GrowArrayElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
- Node* elements = assembler->LoadElements(object);
- elements = assembler->TryGrowElementsCapacity(
+ Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler.LoadElements(object);
+ elements = assembler.TryGrowElementsCapacity(
object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
- assembler->Return(elements);
+ assembler.Return(elements);
- assembler->Bind(&runtime);
- assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+ assembler.Bind(&runtime);
+ assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}
void Builtins::Generate_GrowFastSmiOrObjectElements(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef GrowArrayElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+
+ Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler.LoadElements(object);
+ elements = assembler.TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
+ key, &runtime);
+ assembler.Return(elements);
+
+ assembler.Bind(&runtime);
+ assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+namespace {
+
+void Generate_NewArgumentsElements(CodeStubAssembler* assembler,
+ compiler::Node* frame,
+ compiler::Node* length) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ // Check if we can allocate in new space.
+ ElementsKind kind = FAST_ELEMENTS;
+ int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
+ Label if_newspace(assembler), if_oldspace(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IntPtrLessThan(
+ length, assembler->IntPtrConstant(max_elements)),
+ &if_newspace, &if_oldspace);
+
+ assembler->Bind(&if_newspace);
+ {
+ // Prefer EmptyFixedArray in case of non-positive {length} (the {length}
+ // can be negative here for rest parameters).
+ Label if_empty(assembler), if_notempty(assembler);
+ assembler->Branch(
+ assembler->IntPtrLessThanOrEqual(length, assembler->IntPtrConstant(0)),
+ &if_empty, &if_notempty);
+
+ assembler->Bind(&if_empty);
+ assembler->Return(assembler->EmptyFixedArrayConstant());
+
+ assembler->Bind(&if_notempty);
+ {
+ // Allocate a FixedArray in new space.
+ Node* result = assembler->AllocateFixedArray(kind, length);
+
+ // Compute the effective {offset} into the {frame}.
+ Node* offset = assembler->IntPtrAdd(length, assembler->IntPtrConstant(1));
+
+ // Copy the parameters from {frame} (starting at {offset}) to {result}.
+ Variable var_index(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_index), done_loop(assembler);
+ var_index.Bind(assembler->IntPtrConstant(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {index}.
+ Node* index = var_index.value();
+
+ // Check if we are done.
+ assembler->GotoIf(assembler->WordEqual(index, length), &done_loop);
+
+ // Load the parameter at the given {index}.
+ Node* value = assembler->Load(
+ MachineType::AnyTagged(), frame,
+ assembler->WordShl(assembler->IntPtrSub(offset, index),
+ assembler->IntPtrConstant(kPointerSizeLog2)));
+
+ // Store the {value} into the {result}.
+ assembler->StoreFixedArrayElement(result, index, value,
+ SKIP_WRITE_BARRIER);
+
+ // Continue with next {index}.
+ var_index.Bind(
+ assembler->IntPtrAdd(index, assembler->IntPtrConstant(1)));
+ assembler->Goto(&loop);
+ }
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ assembler->Bind(&done_loop);
+ assembler->Return(result);
+ }
+ }
+
+ assembler->Bind(&if_oldspace);
+ {
+ // Allocate in old space (or large object space).
+ assembler->TailCallRuntime(
+ Runtime::kNewArgumentsElements, assembler->NoContextConstant(),
+ assembler->BitcastWordToTagged(frame), assembler->SmiFromWord(length));
+ }
+}
+
+} // namespace
+
+void Builtins::Generate_NewUnmappedArgumentsElements(
+ compiler::CodeAssemblerState* state) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+ typedef NewArgumentsElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* formal_parameter_count =
+ assembler.Parameter(Descriptor::kFormalParameterCount);
+
+ // Determine the frame that holds the parameters.
+ Label done(&assembler);
+ Variable var_frame(&assembler, MachineType::PointerRepresentation()),
+ var_length(&assembler, MachineType::PointerRepresentation());
+ var_frame.Bind(assembler.LoadParentFramePointer());
+ var_length.Bind(formal_parameter_count);
+ Node* parent_frame = assembler.Load(
+ MachineType::Pointer(), var_frame.value(),
+ assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+ Node* parent_frame_type =
+ assembler.Load(MachineType::AnyTagged(), parent_frame,
+ assembler.IntPtrConstant(
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ assembler.GotoUnless(
+ assembler.WordEqual(
+ parent_frame_type,
+ assembler.SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
+ &done);
+ {
+ // Determine the length from the ArgumentsAdaptorFrame.
+ Node* length = assembler.LoadAndUntagSmi(
+ parent_frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
- Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
- Node* elements = assembler->LoadElements(object);
- elements = assembler->TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
- key, &runtime);
- assembler->Return(elements);
+ // Take the arguments from the ArgumentsAdaptorFrame.
+ var_frame.Bind(parent_frame);
+ var_length.Bind(length);
+ }
+ assembler.Goto(&done);
+
+ // Allocate the actual FixedArray for the elements.
+ assembler.Bind(&done);
+ Generate_NewArgumentsElements(&assembler, var_frame.value(),
+ var_length.value());
+}
+
+void Builtins::Generate_NewRestParameterElements(
+ compiler::CodeAssemblerState* state) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef NewArgumentsElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* formal_parameter_count =
+ assembler.Parameter(Descriptor::kFormalParameterCount);
+
+ // Check if we have an ArgumentsAdaptorFrame, as we will only have rest
+ // parameters in that case.
+ Label if_empty(&assembler);
+ Node* frame = assembler.Load(
+ MachineType::Pointer(), assembler.LoadParentFramePointer(),
+ assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+ Node* frame_type =
+ assembler.Load(MachineType::AnyTagged(), frame,
+ assembler.IntPtrConstant(
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ assembler.GotoUnless(
+ assembler.WordEqual(frame_type, assembler.SmiConstant(Smi::FromInt(
+ StackFrame::ARGUMENTS_ADAPTOR))),
+ &if_empty);
+
+ // Determine the length from the ArgumentsAdaptorFrame.
+ Node* frame_length = assembler.LoadAndUntagSmi(
+ frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
+
+ // Compute the actual rest parameter length (may be negative).
+ Node* length = assembler.IntPtrSub(frame_length, formal_parameter_count);
+
+ // Allocate the actual FixedArray for the elements.
+ Generate_NewArgumentsElements(&assembler, frame, length);
+
+ // No rest parameters, return an empty FixedArray.
+ assembler.Bind(&if_empty);
+ assembler.Return(assembler.EmptyFixedArrayConstant());
+}
- assembler->Bind(&runtime);
- assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+void Builtins::Generate_ReturnReceiver(compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.Parameter(0));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-iterator.cc b/deps/v8/src/builtins/builtins-iterator.cc
deleted file mode 100644
index 7f74c20667..0000000000
--- a/deps/v8/src/builtins/builtins-iterator.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void Builtins::Generate_IteratorPrototypeIterator(
- CodeStubAssembler* assembler) {
- assembler->Return(assembler->Parameter(0));
-}
-
-BUILTIN(ModuleNamespaceIterator) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Handle<Object> receiver = args.at<Object>(0);
-
- if (!receiver->IsJSModuleNamespace()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->iterator_symbol(), receiver));
- }
- auto ns = Handle<JSModuleNamespace>::cast(receiver);
-
- Handle<FixedArray> names =
- KeyAccumulator::GetKeys(ns, KeyCollectionMode::kOwnOnly, SKIP_SYMBOLS)
- .ToHandleChecked();
- return *isolate->factory()->NewJSFixedArrayIterator(names);
-}
-
-BUILTIN(FixedArrayIteratorNext) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Handle<Object> receiver = args.at<Object>(0);
-
- // It is an error if this function is called on anything other than the
- // particular iterator object for which the function was created.
- if (!receiver->IsJSFixedArrayIterator() ||
- Handle<JSFixedArrayIterator>::cast(receiver)->initial_next() !=
- *args.target()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->next_string(), receiver));
- }
-
- auto iterator = Handle<JSFixedArrayIterator>::cast(receiver);
- Handle<Object> value;
- bool done;
-
- int index = iterator->index();
- if (index < iterator->array()->length()) {
- value = handle(iterator->array()->get(index), isolate);
- done = false;
- iterator->set_index(index + 1);
- } else {
- value = isolate->factory()->undefined_value();
- done = true;
- }
-
- return *isolate->factory()->NewJSIteratorResult(value, done);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index 30f12ba12c..1305e73db0 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -13,332 +13,300 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.2.2 Function Properties of the Math Object
-// ES6 section - 20.2.2.1 Math.abs ( x )
-void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+class MathBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit MathBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
- Node* context = assembler->Parameter(4);
+ protected:
+ void MathRoundingOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+ void MathUnaryOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+};
+
+// ES6 section - 20.2.2.1 Math.abs ( x )
+TF_BUILTIN(MathAbs, CodeStubAssembler) {
+ Node* context = Parameter(4);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
// Check if {x} is already positive.
- Label if_xispositive(assembler), if_xisnotpositive(assembler);
- assembler->BranchIfSmiLessThanOrEqual(
- assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
- &if_xisnotpositive);
+ Label if_xispositive(this), if_xisnotpositive(this);
+ BranchIfSmiLessThanOrEqual(SmiConstant(Smi::FromInt(0)), x,
+ &if_xispositive, &if_xisnotpositive);
- assembler->Bind(&if_xispositive);
+ Bind(&if_xispositive);
{
// Just return the input {x}.
- assembler->Return(x);
+ Return(x);
}
- assembler->Bind(&if_xisnotpositive);
+ Bind(&if_xisnotpositive);
{
// Try to negate the {x} value.
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
- Node* overflow = assembler->Projection(1, pair);
- Label if_overflow(assembler, Label::kDeferred),
- if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
+ Node* pair =
+ IntPtrSubWithOverflow(IntPtrConstant(0), BitcastTaggedToWord(x));
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ Bind(&if_notoverflow);
{
// There is a Smi representation for negated {x}.
- Node* result = assembler->Projection(0, pair);
- result = assembler->BitcastWordToTagged(result);
- assembler->Return(result);
+ Node* result = Projection(0, pair);
+ Return(BitcastWordToTagged(result));
}
- assembler->Bind(&if_overflow);
- {
- Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
- assembler->Return(result);
- }
+ Bind(&if_overflow);
+ { Return(NumberConstant(0.0 - Smi::kMinValue)); }
}
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- Node* x_value = assembler->LoadHeapNumberValue(x);
- Node* value = assembler->Float64Abs(x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+ Node* x_value = LoadHeapNumberValue(x);
+ Node* value = Float64Abs(x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
}
-namespace {
-
-void Generate_MathRoundingOperation(
- CodeStubAssembler* assembler,
- compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+void MathBuiltinsAssembler::MathRoundingOperation(
+ Node* (CodeStubAssembler::*float64op)(Node*)) {
+ Node* context = Parameter(4);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
// Nothing to do when {x} is a Smi.
- assembler->Return(x);
+ Return(x);
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- Node* x_value = assembler->LoadHeapNumberValue(x);
- Node* value = (assembler->*float64op)(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Node* x_value = LoadHeapNumberValue(x);
+ Node* value = (this->*float64op)(x_value);
+ Node* result = ChangeFloat64ToTagged(value);
+ Return(result);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
}
-void Generate_MathUnaryOperation(
- CodeStubAssembler* assembler,
- compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
- typedef compiler::Node Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = (assembler->*float64op)(x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+void MathBuiltinsAssembler::MathUnaryOperation(
+ Node* (CodeStubAssembler::*float64op)(Node*)) {
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value = (this->*float64op)(x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
-} // namespace
-
// ES6 section 20.2.2.2 Math.acos ( x )
-void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
+TF_BUILTIN(MathAcos, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Acos);
}
// ES6 section 20.2.2.3 Math.acosh ( x )
-void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
+TF_BUILTIN(MathAcosh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Acosh);
}
// ES6 section 20.2.2.4 Math.asin ( x )
-void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
+TF_BUILTIN(MathAsin, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Asin);
}
// ES6 section 20.2.2.5 Math.asinh ( x )
-void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
+TF_BUILTIN(MathAsinh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Asinh);
}
-
// ES6 section 20.2.2.6 Math.atan ( x )
-void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
+TF_BUILTIN(MathAtan, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Atan);
}
// ES6 section 20.2.2.7 Math.atanh ( x )
-void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
+TF_BUILTIN(MathAtanh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Atanh);
}
// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
- using compiler::Node;
+TF_BUILTIN(MathAtan2, CodeStubAssembler) {
+ Node* y = Parameter(1);
+ Node* x = Parameter(2);
+ Node* context = Parameter(5);
- Node* y = assembler->Parameter(1);
- Node* x = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Atan2(y_value, x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+ Node* y_value = TruncateTaggedToFloat64(context, y);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value = Float64Atan2(y_value, x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
// ES6 section 20.2.2.10 Math.ceil ( x )
-void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
+TF_BUILTIN(MathCeil, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Ceil);
}
// ES6 section 20.2.2.9 Math.cbrt ( x )
-void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
+TF_BUILTIN(MathCbrt, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cbrt);
}
// ES6 section 20.2.2.11 Math.clz32 ( x )
-void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(MathClz32, CodeStubAssembler) {
+ Node* context = Parameter(4);
// Shared entry point for the clz32 operation.
- Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
- Label do_clz32(assembler);
+ Variable var_clz32_x(this, MachineRepresentation::kWord32);
+ Label do_clz32(this);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
- var_clz32_x.Bind(assembler->SmiToWord32(x));
- assembler->Goto(&do_clz32);
+ var_clz32_x.Bind(SmiToWord32(x));
+ Goto(&do_clz32);
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
- assembler->Goto(&do_clz32);
+ var_clz32_x.Bind(TruncateHeapNumberValueToWord32(x));
+ Goto(&do_clz32);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_clz32);
+ Bind(&do_clz32);
{
Node* x_value = var_clz32_x.value();
- Node* value = assembler->Word32Clz(x_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ Node* value = Word32Clz(x_value);
+ Node* result = ChangeInt32ToTagged(value);
+ Return(result);
}
}
// ES6 section 20.2.2.12 Math.cos ( x )
-void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
+TF_BUILTIN(MathCos, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cos);
}
// ES6 section 20.2.2.13 Math.cosh ( x )
-void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
+TF_BUILTIN(MathCosh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cosh);
}
// ES6 section 20.2.2.14 Math.exp ( x )
-void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
+TF_BUILTIN(MathExp, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Exp);
}
// ES6 section 20.2.2.15 Math.expm1 ( x )
-void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
+TF_BUILTIN(MathExpm1, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Expm1);
}
// ES6 section 20.2.2.16 Math.floor ( x )
-void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
+TF_BUILTIN(MathFloor, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Floor);
}
// ES6 section 20.2.2.17 Math.fround ( x )
-void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
- Node* value = assembler->ChangeFloat32ToFloat64(value32);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+TF_BUILTIN(MathFround, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value32 = TruncateFloat64ToFloat32(x_value);
+ Node* value = ChangeFloat32ToFloat64(value32);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
// ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
@@ -351,7 +319,7 @@ BUILTIN(MathHypot) {
bool one_arg_is_nan = false;
List<double> abs_values(length);
for (int i = 0; i < length; i++) {
- Handle<Object> x = args.at<Object>(i + 1);
+ Handle<Object> x = args.at(i + 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
double abs_value = std::abs(x->Number());
@@ -394,153 +362,134 @@ BUILTIN(MathHypot) {
}
// ES6 section 20.2.2.19 Math.imul ( x, y )
-void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* y = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* x_value = assembler->TruncateTaggedToWord32(context, x);
- Node* y_value = assembler->TruncateTaggedToWord32(context, y);
- Node* value = assembler->Int32Mul(x_value, y_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(MathImul, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* y = Parameter(2);
+ Node* context = Parameter(5);
+ Node* x_value = TruncateTaggedToWord32(context, x);
+ Node* y_value = TruncateTaggedToWord32(context, y);
+ Node* value = Int32Mul(x_value, y_value);
+ Node* result = ChangeInt32ToTagged(value);
+ Return(result);
}
// ES6 section 20.2.2.20 Math.log ( x )
-void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
+TF_BUILTIN(MathLog, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log);
}
// ES6 section 20.2.2.21 Math.log1p ( x )
-void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
+TF_BUILTIN(MathLog1p, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log1p);
}
// ES6 section 20.2.2.22 Math.log10 ( x )
-void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
+TF_BUILTIN(MathLog10, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log10);
}
// ES6 section 20.2.2.23 Math.log2 ( x )
-void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
+TF_BUILTIN(MathLog2, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log2);
}
// ES6 section 20.2.2.26 Math.pow ( x, y )
-void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* y = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
- Node* value = assembler->Float64Pow(x_value, y_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(MathPow, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* y = Parameter(2);
+ Node* context = Parameter(5);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* y_value = TruncateTaggedToFloat64(context, y);
+ Node* value = Float64Pow(x_value, y_value);
+ Node* result = ChangeFloat64ToTagged(value);
+ Return(result);
}
// ES6 section 20.2.2.27 Math.random ( )
-void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* context = assembler->Parameter(3);
- Node* native_context = assembler->LoadNativeContext(context);
+TF_BUILTIN(MathRandom, CodeStubAssembler) {
+ Node* context = Parameter(3);
+ Node* native_context = LoadNativeContext(context);
// Load cache index.
- CodeStubAssembler::Variable smi_index(assembler,
- MachineRepresentation::kTagged);
- smi_index.Bind(assembler->LoadContextElement(
- native_context, Context::MATH_RANDOM_INDEX_INDEX));
+ Variable smi_index(this, MachineRepresentation::kTagged);
+ smi_index.Bind(
+ LoadContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX));
// Cached random numbers are exhausted if index is 0. Go to slow path.
- CodeStubAssembler::Label if_cached(assembler);
- assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
- assembler->SmiConstant(Smi::kZero)),
- &if_cached);
+ Label if_cached(this);
+ GotoIf(SmiAbove(smi_index.value(), SmiConstant(Smi::kZero)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
- smi_index.Bind(
- assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
- assembler->Goto(&if_cached);
+ smi_index.Bind(CallRuntime(Runtime::kGenerateRandomNumbers, context));
+ Goto(&if_cached);
// Compute next index by decrement.
- assembler->Bind(&if_cached);
- Node* new_smi_index = assembler->SmiSub(
- smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
- assembler->StoreContextElement(
- native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
+ Bind(&if_cached);
+ Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(Smi::FromInt(1)));
+ StoreContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX,
+ new_smi_index);
// Load and return next cached random number.
- Node* array = assembler->LoadContextElement(native_context,
- Context::MATH_RANDOM_CACHE_INDEX);
- Node* random = assembler->LoadFixedDoubleArrayElement(
- array, new_smi_index, MachineType::Float64(), 0,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->Return(assembler->AllocateHeapNumberWithValue(random));
+ Node* array =
+ LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX);
+ Node* random = LoadFixedDoubleArrayElement(
+ array, new_smi_index, MachineType::Float64(), 0, SMI_PARAMETERS);
+ Return(AllocateHeapNumberWithValue(random));
}
// ES6 section 20.2.2.28 Math.round ( x )
-void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
+TF_BUILTIN(MathRound, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Round);
}
// ES6 section 20.2.2.29 Math.sign ( x )
-void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- using compiler::Node;
-
+TF_BUILTIN(MathSign, CodeStubAssembler) {
// Convert the {x} value to a Number.
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
// Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
- Label if_xisnegative(assembler), if_xispositive(assembler);
- assembler->GotoIf(
- assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
- &if_xisnegative);
- assembler->GotoIf(
- assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
- &if_xispositive);
- assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
+ Label if_xisnegative(this), if_xispositive(this);
+ GotoIf(Float64LessThan(x_value, Float64Constant(0.0)), &if_xisnegative);
+ GotoIf(Float64LessThan(Float64Constant(0.0), x_value), &if_xispositive);
+ Return(ChangeFloat64ToTagged(x_value));
- assembler->Bind(&if_xisnegative);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
+ Bind(&if_xisnegative);
+ Return(SmiConstant(Smi::FromInt(-1)));
- assembler->Bind(&if_xispositive);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
+ Bind(&if_xispositive);
+ Return(SmiConstant(Smi::FromInt(1)));
}
// ES6 section 20.2.2.30 Math.sin ( x )
-void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
+TF_BUILTIN(MathSin, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sin);
}
// ES6 section 20.2.2.31 Math.sinh ( x )
-void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
+TF_BUILTIN(MathSinh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sinh);
}
// ES6 section 20.2.2.32 Math.sqrt ( x )
-void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
+TF_BUILTIN(MathSqrt, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sqrt);
}
// ES6 section 20.2.2.33 Math.tan ( x )
-void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
+TF_BUILTIN(MathTan, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Tan);
}
// ES6 section 20.2.2.34 Math.tanh ( x )
-void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
+TF_BUILTIN(MathTanh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Tanh);
}
// ES6 section 20.2.2.35 Math.trunc ( x )
-void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Trunc);
}
void Builtins::Generate_MathMax(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 3e2bc556b6..7e750139de 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -5,253 +5,251 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
+class NumberBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit NumberBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ template <Signedness signed_result = kSigned>
+ void BitwiseOp(std::function<Node*(Node* lhs, Node* rhs)> body) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
+
+ Node* lhs_value = TruncateTaggedToWord32(context, left);
+ Node* rhs_value = TruncateTaggedToWord32(context, right);
+ Node* value = body(lhs_value, rhs_value);
+ Node* result = signed_result == kSigned ? ChangeInt32ToTagged(value)
+ : ChangeUint32ToTagged(value);
+ Return(result);
+ }
+
+ template <Signedness signed_result = kSigned>
+ void BitwiseShiftOp(std::function<Node*(Node* lhs, Node* shift_count)> body) {
+ BitwiseOp<signed_result>([this, body](Node* lhs, Node* rhs) {
+ Node* shift_count = Word32And(rhs, Int32Constant(0x1f));
+ return body(lhs, shift_count);
+ });
+ }
+
+ void RelationalComparisonBuiltin(RelationalComparisonMode mode) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
+
+ Return(RelationalComparison(mode, lhs, rhs, context));
+ }
+};
+
// -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects
// ES6 section 20.1.2.2 Number.isFinite ( number )
-void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a finite, non-NaN value.
- Node* number_value = assembler->LoadHeapNumberValue(number);
- assembler->BranchIfFloat64IsNaN(
- assembler->Float64Sub(number_value, number_value), &return_false,
- &return_true);
+ Node* number_value = LoadHeapNumberValue(number);
+ BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
+ &return_true);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.3 Number.isInteger ( number )
-void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
- Node* number_value = assembler->LoadHeapNumberValue(number);
+ Node* number_value = LoadHeapNumberValue(number);
// Truncate the value of {number} to an integer (or an infinity).
- Node* integer = assembler->Float64Trunc(number_value);
+ Node* integer = Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- assembler->Branch(
- assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
- assembler->Float64Constant(0.0)),
- &return_true, &return_false);
+ Branch(Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
+ &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.4 Number.isNaN ( number )
-void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_false);
+ GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a NaN value.
- Node* number_value = assembler->LoadHeapNumberValue(number);
- assembler->BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
+ Node* number_value = LoadHeapNumberValue(number);
+ BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
-void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* number = assembler->Parameter(1);
+TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
- Node* number_value = assembler->LoadHeapNumberValue(number);
+ Node* number_value = LoadHeapNumberValue(number);
// Truncate the value of {number} to an integer (or an infinity).
- Node* integer = assembler->Float64Trunc(number_value);
+ Node* integer = Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- assembler->GotoUnless(
- assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
- assembler->Float64Constant(0.0)),
+ GotoUnless(
+ Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
&return_false);
// Check if the {integer} value is in safe integer range.
- assembler->Branch(assembler->Float64LessThanOrEqual(
- assembler->Float64Abs(integer),
- assembler->Float64Constant(kMaxSafeInteger)),
- &return_true, &return_false);
+ Branch(Float64LessThanOrEqual(Float64Abs(integer),
+ Float64Constant(kMaxSafeInteger)),
+ &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.12 Number.parseFloat ( string )
-void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
+ Node* context = Parameter(4);
// We might need to loop once for ToString conversion.
- Variable var_input(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_input);
- var_input.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_input(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_input);
+ var_input.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {input} value.
Node* input = var_input.value();
// Check if the {input} is a HeapObject or a Smi.
- Label if_inputissmi(assembler), if_inputisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(input), &if_inputissmi,
- &if_inputisnotsmi);
+ Label if_inputissmi(this), if_inputisnotsmi(this);
+ Branch(TaggedIsSmi(input), &if_inputissmi, &if_inputisnotsmi);
- assembler->Bind(&if_inputissmi);
+ Bind(&if_inputissmi);
{
// The {input} is already a Number, no need to do anything.
- assembler->Return(input);
+ Return(input);
}
- assembler->Bind(&if_inputisnotsmi);
+ Bind(&if_inputisnotsmi);
{
// The {input} is a HeapObject, check if it's already a String.
- Label if_inputisstring(assembler), if_inputisnotstring(assembler);
- Node* input_map = assembler->LoadMap(input);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
- assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
- &if_inputisstring, &if_inputisnotstring);
+ Label if_inputisstring(this), if_inputisnotstring(this);
+ Node* input_map = LoadMap(input);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+ Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+ &if_inputisnotstring);
- assembler->Bind(&if_inputisstring);
+ Bind(&if_inputisstring);
{
// The {input} is already a String, check if {input} contains
// a cached array index.
- Label if_inputcached(assembler), if_inputnotcached(assembler);
- Node* input_hash = assembler->LoadNameHashField(input);
- Node* input_bit = assembler->Word32And(
- input_hash,
- assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
- assembler->Branch(
- assembler->Word32Equal(input_bit, assembler->Int32Constant(0)),
- &if_inputcached, &if_inputnotcached);
-
- assembler->Bind(&if_inputcached);
+ Label if_inputcached(this), if_inputnotcached(this);
+ Node* input_hash = LoadNameHashField(input);
+ Node* input_bit = Word32And(
+ input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+ Branch(Word32Equal(input_bit, Int32Constant(0)), &if_inputcached,
+ &if_inputnotcached);
+
+ Bind(&if_inputcached);
{
// Just return the {input}s cached array index.
Node* input_array_index =
- assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
- input_hash);
- assembler->Return(assembler->SmiTag(input_array_index));
+ DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+ Return(SmiTag(input_array_index));
}
- assembler->Bind(&if_inputnotcached);
+ Bind(&if_inputnotcached);
{
// Need to fall back to the runtime to convert {input} to double.
- assembler->Return(assembler->CallRuntime(Runtime::kStringParseFloat,
- context, input));
+ Return(CallRuntime(Runtime::kStringParseFloat, context, input));
}
}
- assembler->Bind(&if_inputisnotstring);
+ Bind(&if_inputisnotstring);
{
// The {input} is neither a String nor a Smi, check for HeapNumber.
- Label if_inputisnumber(assembler),
- if_inputisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &if_inputisnumber, &if_inputisnotnumber);
+ Label if_inputisnumber(this),
+ if_inputisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(input_map), &if_inputisnumber,
+ &if_inputisnotnumber);
- assembler->Bind(&if_inputisnumber);
+ Bind(&if_inputisnumber);
{
// The {input} is already a Number, take care of -0.
- Label if_inputiszero(assembler), if_inputisnotzero(assembler);
- Node* input_value = assembler->LoadHeapNumberValue(input);
- assembler->Branch(assembler->Float64Equal(
- input_value, assembler->Float64Constant(0.0)),
- &if_inputiszero, &if_inputisnotzero);
+ Label if_inputiszero(this), if_inputisnotzero(this);
+ Node* input_value = LoadHeapNumberValue(input);
+ Branch(Float64Equal(input_value, Float64Constant(0.0)),
+ &if_inputiszero, &if_inputisnotzero);
- assembler->Bind(&if_inputiszero);
- assembler->Return(assembler->SmiConstant(0));
+ Bind(&if_inputiszero);
+ Return(SmiConstant(0));
- assembler->Bind(&if_inputisnotzero);
- assembler->Return(input);
+ Bind(&if_inputisnotzero);
+ Return(input);
}
- assembler->Bind(&if_inputisnotnumber);
+ Bind(&if_inputisnotnumber);
{
// Need to convert the {input} to String first.
// TODO(bmeurer): This could be more efficient if necessary.
- Callable callable = CodeFactory::ToString(assembler->isolate());
- var_input.Bind(assembler->CallStub(callable, context, input));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::ToString(isolate());
+ var_input.Bind(CallStub(callable, context, input));
+ Goto(&loop);
}
}
}
@@ -259,106 +257,86 @@ void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
}
// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
-void Builtins::Generate_NumberParseInt(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* input = assembler->Parameter(1);
- Node* radix = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
+TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
+ Node* input = Parameter(1);
+ Node* radix = Parameter(2);
+ Node* context = Parameter(5);
// Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
- Label if_radix10(assembler), if_generic(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordEqual(radix, assembler->UndefinedConstant()),
- &if_radix10);
- assembler->GotoIf(
- assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(10))),
- &if_radix10);
- assembler->GotoIf(
- assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(0))),
- &if_radix10);
- assembler->Goto(&if_generic);
-
- assembler->Bind(&if_radix10);
+ Label if_radix10(this), if_generic(this, Label::kDeferred);
+ GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(10))), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(0))), &if_radix10);
+ Goto(&if_generic);
+
+ Bind(&if_radix10);
{
// Check if we can avoid the ToString conversion on {input}.
- Label if_inputissmi(assembler), if_inputisheapnumber(assembler),
- if_inputisstring(assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(input), &if_inputissmi);
- Node* input_map = assembler->LoadMap(input);
- assembler->GotoIf(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &if_inputisheapnumber);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
- assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
- &if_inputisstring, &if_generic);
-
- assembler->Bind(&if_inputissmi);
+ Label if_inputissmi(this), if_inputisheapnumber(this),
+ if_inputisstring(this);
+ GotoIf(TaggedIsSmi(input), &if_inputissmi);
+ Node* input_map = LoadMap(input);
+ GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+ Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+ &if_generic);
+
+ Bind(&if_inputissmi);
{
// Just return the {input}.
- assembler->Return(input);
+ Return(input);
}
- assembler->Bind(&if_inputisheapnumber);
+ Bind(&if_inputisheapnumber);
{
// Check if the {input} value is in Signed32 range.
- Label if_inputissigned32(assembler);
- Node* input_value = assembler->LoadHeapNumberValue(input);
- Node* input_value32 = assembler->TruncateFloat64ToWord32(input_value);
- assembler->GotoIf(
- assembler->Float64Equal(
- input_value, assembler->ChangeInt32ToFloat64(input_value32)),
- &if_inputissigned32);
+ Label if_inputissigned32(this);
+ Node* input_value = LoadHeapNumberValue(input);
+ Node* input_value32 = TruncateFloat64ToWord32(input_value);
+ GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
+ &if_inputissigned32);
// Check if the absolute {input} value is in the ]0.01,1e9[ range.
- Node* input_value_abs = assembler->Float64Abs(input_value);
+ Node* input_value_abs = Float64Abs(input_value);
- assembler->GotoUnless(
- assembler->Float64LessThan(input_value_abs,
- assembler->Float64Constant(1e9)),
- &if_generic);
- assembler->Branch(assembler->Float64LessThan(
- assembler->Float64Constant(0.01), input_value_abs),
- &if_inputissigned32, &if_generic);
+ GotoUnless(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ &if_generic);
+ Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+ &if_inputissigned32, &if_generic);
// Return the truncated int32 value, and return the tagged result.
- assembler->Bind(&if_inputissigned32);
- Node* result = assembler->ChangeInt32ToTagged(input_value32);
- assembler->Return(result);
+ Bind(&if_inputissigned32);
+ Node* result = ChangeInt32ToTagged(input_value32);
+ Return(result);
}
- assembler->Bind(&if_inputisstring);
+ Bind(&if_inputisstring);
{
// Check if the String {input} has a cached array index.
- Node* input_hash = assembler->LoadNameHashField(input);
- Node* input_bit = assembler->Word32And(
- input_hash,
- assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
- assembler->GotoIf(
- assembler->Word32NotEqual(input_bit, assembler->Int32Constant(0)),
- &if_generic);
+ Node* input_hash = LoadNameHashField(input);
+ Node* input_bit = Word32And(
+ input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+ GotoIf(Word32NotEqual(input_bit, Int32Constant(0)), &if_generic);
// Return the cached array index as result.
Node* input_index =
- assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
- input_hash);
- Node* result = assembler->SmiTag(input_index);
- assembler->Return(result);
+ DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+ Node* result = SmiTag(input_index);
+ Return(result);
}
}
- assembler->Bind(&if_generic);
+ Bind(&if_generic);
{
- Node* result =
- assembler->CallRuntime(Runtime::kStringParseInt, context, input, radix);
- assembler->Return(result);
+ Node* result = CallRuntime(Runtime::kStringParseInt, context, input, radix);
+ Return(result);
}
}
// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
BUILTIN(NumberPrototypeToExponential) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -401,7 +379,7 @@ BUILTIN(NumberPrototypeToExponential) {
// ES6 section 20.1.3.3 Number.prototype.toFixed ( fractionDigits )
BUILTIN(NumberPrototypeToFixed) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -444,7 +422,7 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
// Unwrap the receiver {value}.
if (value->IsJSValue()) {
@@ -464,7 +442,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
// ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
BUILTIN(NumberPrototypeToPrecision) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> precision = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -508,7 +486,7 @@ BUILTIN(NumberPrototypeToPrecision) {
// ES6 section 20.1.3.6 Number.prototype.toString ( [ radix ] )
BUILTIN(NumberPrototypeToString) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> radix = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -543,7 +521,8 @@ BUILTIN(NumberPrototypeToString) {
}
// Fast case where the result is a one character string.
- if (IsUint32Double(value_number) && value_number < radix_number) {
+ if ((IsUint32Double(value_number) && value_number < radix_number) ||
+ value_number == -0.0) {
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return *isolate->factory()->LookupSingleCharacterStringFromCode(
@@ -564,342 +543,315 @@ BUILTIN(NumberPrototypeToString) {
}
// ES6 section 20.1.3.7 Number.prototype.valueOf ( )
-void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
+ "Number.prototype.valueOf");
+ Return(result);
}
-// static
-void Builtins::Generate_Add(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Add, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry for floating point addition.
- Label do_fadd(assembler);
- Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
- var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+ Label do_fadd(this);
+ Variable var_fadd_lhs(this, MachineRepresentation::kFloat64),
+ var_fadd_rhs(this, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars), end(assembler),
- string_add_convert_left(assembler, Label::kDeferred),
- string_add_convert_right(assembler, Label::kDeferred);
+ Label loop(this, 2, loop_vars), end(this),
+ string_add_convert_left(this, Label::kDeferred),
+ string_add_convert_right(this, Label::kDeferred);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
- &if_lhsisnotsmi);
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- assembler->Bind(&if_lhsissmi);
+ Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Try fast Smi addition first.
- Node* pair = assembler->IntPtrAddWithOverflow(
- assembler->BitcastTaggedToWord(lhs),
- assembler->BitcastTaggedToWord(rhs));
- Node* overflow = assembler->Projection(1, pair);
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
// Check if the Smi additon overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
- assembler->Bind(&if_overflow);
+ Bind(&if_overflow);
{
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->BitcastWordToTaggedSigned(
- assembler->Projection(0, pair)));
- assembler->Goto(&end);
+ Bind(&if_notoverflow);
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
// Check if the {rhs} is a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
- assembler->Bind(&if_rhsisstring);
+ Bind(&if_rhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
+ Goto(&string_add_convert_left);
}
- assembler->Bind(&if_rhsisnotstring);
+ Bind(&if_rhsisnotstring);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
}
}
- assembler->Bind(&if_lhsisnotsmi);
+ Bind(&if_lhsisnotsmi);
{
// Load the map and instance type of {lhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* lhs_instance_type = LoadInstanceType(lhs);
// Check if {lhs} is a String.
- Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
- &if_lhsisstring, &if_lhsisnotstring);
+ Label if_lhsisstring(this), if_lhsisnotstring(this);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
- assembler->Bind(&if_lhsisstring);
+ Bind(&if_lhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_right);
+ Goto(&string_add_convert_right);
}
- assembler->Bind(&if_lhsisnotstring);
+ Bind(&if_lhsisnotstring);
{
// Check if {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Check if {lhs} is a Number.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(
+ Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
{
// The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// The {lhs} is neither a Number nor a String, and the {rhs} is a
// Smi.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
- assembler->Bind(&if_lhsisreceiver);
+ Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
- assembler->Bind(&if_lhsisnotreceiver);
+ Bind(&if_lhsisnotreceiver);
{
// Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
// Check if {rhs} is a String.
- Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
+ Label if_rhsisstring(this), if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
- assembler->Bind(&if_rhsisstring);
+ Bind(&if_rhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
+ Goto(&string_add_convert_left);
}
- assembler->Bind(&if_rhsisnotstring);
+ Bind(&if_rhsisnotstring);
{
// Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(
+ Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
- assembler->Bind(&if_lhsisnumber);
+ Bind(&if_lhsisnumber);
{
// Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- rhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
+ Label if_rhsisnumber(this),
+ if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(Word32Equal(rhs_instance_type,
+ Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
{
// Perform a floating point addition.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// Check if {lhs} is a JSReceiver.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
- assembler->Bind(&if_lhsisreceiver);
+ Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
- assembler->Bind(&if_lhsisnotreceiver);
+ Bind(&if_lhsisnotreceiver);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
@@ -908,910 +860,755 @@ void Builtins::Generate_Add(CodeStubAssembler* assembler) {
}
}
}
- assembler->Bind(&string_add_convert_left);
+ Bind(&string_add_convert_left);
{
// Convert {lhs}, which is a Smi, to a String and concatenate the
// resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
+ Callable callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ var_result.Bind(
+ CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+ Goto(&end);
}
- assembler->Bind(&string_add_convert_right);
+ Bind(&string_add_convert_right);
{
// Convert {lhs}, which is a Smi, to a String and concatenate the
// resulting string with the String {rhs}.
Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
+ isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ var_result.Bind(
+ CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+ Goto(&end);
}
- assembler->Bind(&do_fadd);
+ Bind(&do_fadd);
{
Node* lhs_value = var_fadd_lhs.value();
Node* rhs_value = var_fadd_rhs.value();
- Node* value = assembler->Float64Add(lhs_value, rhs_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
+ Node* value = Float64Add(lhs_value, rhs_value);
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
- assembler->Goto(&end);
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Subtract(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Subtract, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry for floating point subtraction.
- Label do_fsub(assembler), end(assembler);
- Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
- var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+ Label do_fsub(this), end(this);
+ Variable var_fsub_lhs(this, MachineRepresentation::kFloat64),
+ var_fsub_rhs(this, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
+ Label loop(this, 2, loop_vars);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
- &if_lhsisnotsmi);
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- assembler->Bind(&if_lhsissmi);
+ Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Try a fast Smi subtraction first.
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->BitcastTaggedToWord(lhs),
- assembler->BitcastTaggedToWord(rhs));
- Node* overflow = assembler->Projection(1, pair);
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
// Check if the Smi subtraction overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
- assembler->Bind(&if_overflow);
+ Bind(&if_overflow);
{
// The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->BitcastWordToTaggedSigned(
- assembler->Projection(0, pair)));
- assembler->Goto(&end);
+ Bind(&if_notoverflow);
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotsmi);
+ Bind(&if_lhsisnotsmi);
{
// Load the map of the {lhs}.
- Node* lhs_map = assembler->LoadMap(lhs);
+ Node* lhs_map = LoadMap(lhs);
// Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
- assembler->Bind(&if_lhsisnumber);
+ Bind(&if_lhsisnumber);
{
// Check if the {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// Convert the {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fsub);
+ Bind(&do_fsub);
{
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
- Node* value = assembler->Float64Sub(lhs_value, rhs_value);
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&end);
+ Node* value = Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Multiply(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Multiply, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry point for floating point multiplication.
- Label do_fmul(assembler), return_result(assembler);
- Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
- var_rhs_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fmul(this), return_result(this);
+ Variable var_lhs_float64(this, MachineRepresentation::kFloat64),
+ var_rhs_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
- Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi,
- &lhs_is_not_smi);
+ Label lhs_is_smi(this), lhs_is_not_smi(this);
+ Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
- assembler->Bind(&lhs_is_smi);
+ Bind(&lhs_is_smi);
{
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
+ Label rhs_is_smi(this), rhs_is_not_smi(this);
+ Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
- assembler->Bind(&rhs_is_smi);
+ Bind(&rhs_is_smi);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
- var_result.Bind(assembler->SmiMul(lhs, rhs));
- assembler->Goto(&return_result);
+ var_result.Bind(SmiMul(lhs, rhs));
+ Goto(&return_result);
}
- assembler->Bind(&rhs_is_not_smi);
+ Bind(&rhs_is_not_smi);
{
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
+ Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
- assembler->Bind(&rhs_is_number);
+ Bind(&rhs_is_number);
{
// Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(SmiToFloat64(lhs));
+ var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_number);
+ Bind(&rhs_is_not_number);
{
// Multiplication is commutative, swap {lhs} with {rhs} and loop.
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
- assembler->Goto(&loop);
+ Goto(&loop);
}
}
}
- assembler->Bind(&lhs_is_not_smi);
+ Bind(&lhs_is_not_smi);
{
- Node* lhs_map = assembler->LoadMap(lhs);
+ Node* lhs_map = LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- Label lhs_is_number(assembler),
- lhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &lhs_is_number, &lhs_is_not_number);
+ Label lhs_is_number(this), lhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(lhs_map), &lhs_is_number, &lhs_is_not_number);
- assembler->Bind(&lhs_is_number);
+ Bind(&lhs_is_number);
{
// Check if {rhs} is a Smi.
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
+ Label rhs_is_smi(this), rhs_is_not_smi(this);
+ Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
- assembler->Bind(&rhs_is_smi);
+ Bind(&rhs_is_smi);
{
// Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(SmiToFloat64(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_smi);
+ Bind(&rhs_is_not_smi);
{
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
+ Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
- assembler->Bind(&rhs_is_number);
+ Bind(&rhs_is_number);
{
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
// multiply them.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_number);
+ Bind(&rhs_is_not_number);
{
// Multiplication is commutative, swap {lhs} with {rhs} and loop.
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
- assembler->Goto(&loop);
+ Goto(&loop);
}
}
}
- assembler->Bind(&lhs_is_not_number);
+ Bind(&lhs_is_not_number);
{
// Convert {lhs} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fmul);
+ Bind(&do_fmul);
{
- Node* value =
- assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = assembler->AllocateHeapNumberWithValue(value);
+ Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
- assembler->Goto(&return_result);
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
- assembler->Return(var_result.value());
+ Bind(&return_result);
+ Return(var_result.value());
}
-void Builtins::Generate_Divide(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Divide, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry point for floating point division.
- Label do_fdiv(assembler), end(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fdiv(this), end(this);
+ Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+ var_divisor_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_dividend(this, MachineRepresentation::kTagged),
+ var_divisor(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_dividend.Bind(left);
var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* dividend = var_dividend.value();
Node* divisor = var_divisor.value();
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
+ Label dividend_is_smi(this), dividend_is_not_smi(this);
+ Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
- assembler->Bind(&dividend_is_smi);
+ Bind(&dividend_is_smi);
{
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
- Label bailout(assembler);
+ Label bailout(this);
// Do floating point division if {divisor} is zero.
- assembler->GotoIf(
- assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
- &bailout);
+ GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
// Do floating point division {dividend} is zero and {divisor} is
// negative.
- Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
- assembler->Branch(
- assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
- &dividend_is_zero, &dividend_is_not_zero);
+ Label dividend_is_zero(this), dividend_is_not_zero(this);
+ Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+ &dividend_is_not_zero);
- assembler->Bind(&dividend_is_zero);
+ Bind(&dividend_is_zero);
{
- assembler->GotoIf(
- assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
- &bailout);
- assembler->Goto(&dividend_is_not_zero);
+ GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
+ Goto(&dividend_is_not_zero);
}
- assembler->Bind(&dividend_is_not_zero);
+ Bind(&dividend_is_not_zero);
- Node* untagged_divisor = assembler->SmiUntag(divisor);
- Node* untagged_dividend = assembler->SmiUntag(dividend);
+ Node* untagged_divisor = SmiToWord32(divisor);
+ Node* untagged_dividend = SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(assembler),
- divisor_is_not_minus_one(assembler);
- assembler->Branch(assembler->Word32Equal(untagged_divisor,
- assembler->Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
+ Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
+ Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
- assembler->Bind(&divisor_is_minus_one);
+ Bind(&divisor_is_minus_one);
{
- assembler->GotoIf(
- assembler->Word32Equal(
- untagged_dividend,
- assembler->Int32Constant(
- kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ GotoIf(
+ Word32Equal(untagged_dividend,
+ Int32Constant(kSmiValueSize == 32 ? kMinInt
+ : (kMinInt >> 1))),
&bailout);
- assembler->Goto(&divisor_is_not_minus_one);
+ Goto(&divisor_is_not_minus_one);
}
- assembler->Bind(&divisor_is_not_minus_one);
+ Bind(&divisor_is_not_minus_one);
// TODO(epertoso): consider adding a machine instruction that returns
// both the result and the remainder.
- Node* untagged_result =
- assembler->Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated =
- assembler->Int32Mul(untagged_result, untagged_divisor);
+ Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
- assembler->GotoIf(
- assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
- var_result.Bind(assembler->SmiTag(untagged_result));
- assembler->Goto(&end);
+ GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
+ var_result.Bind(SmiFromWord32(untagged_result));
+ Goto(&end);
// Bailout: convert {dividend} and {divisor} to double and do double
// division.
- assembler->Bind(&bailout);
+ Bind(&bailout);
{
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fdiv);
}
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_smi);
+ Bind(&dividend_is_not_smi);
{
- Node* dividend_map = assembler->LoadMap(dividend);
+ Node* dividend_map = LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
+ Label dividend_is_number(this),
+ dividend_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+ &dividend_is_not_number);
- assembler->Bind(&dividend_is_number);
+ Bind(&dividend_is_number);
{
// Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Convert {divisor} to a double and use it for a floating point
// division.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_number);
+ Bind(&dividend_is_not_number);
{
// Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_dividend.Bind(CallStub(callable, context, dividend));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fdiv);
+ Bind(&do_fdiv);
{
- Node* value = assembler->Float64Div(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&end);
+ Node* value =
+ Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Modulus(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(Modulus, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
-
- Variable var_result(assembler, MachineRepresentation::kTagged);
- Label return_result(assembler, &var_result);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label return_result(this, &var_result);
// Shared entry point for floating point modulus.
- Label do_fmod(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fmod(this);
+ Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+ var_divisor_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged);
+ Variable var_dividend(this, MachineRepresentation::kTagged),
+ var_divisor(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_dividend.Bind(left);
var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* dividend = var_dividend.value();
Node* divisor = var_divisor.value();
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
+ Label dividend_is_smi(this), dividend_is_not_smi(this);
+ Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
- assembler->Bind(&dividend_is_smi);
+ Bind(&dividend_is_smi);
{
- Label dividend_is_not_zero(assembler);
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label dividend_is_not_zero(this);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Compute the modulus of two Smis.
- var_result.Bind(assembler->SmiMod(dividend, divisor));
- assembler->Goto(&return_result);
+ var_result.Bind(SmiMod(dividend, divisor));
+ Goto(&return_result);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Convert {dividend} to a double and compute its modulus with the
// value of {dividend}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_smi);
+ Bind(&dividend_is_not_smi);
{
- Node* dividend_map = assembler->LoadMap(dividend);
+ Node* dividend_map = LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
+ Label dividend_is_number(this),
+ dividend_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+ &dividend_is_not_number);
- assembler->Bind(&dividend_is_number);
+ Bind(&dividend_is_number);
{
// Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Convert {divisor} to a double and compute {dividend}'s modulus with
// it.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and compute their modulus.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_number);
+ Bind(&dividend_is_not_number);
{
// Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_dividend.Bind(CallStub(callable, context, dividend));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fmod);
+ Bind(&do_fmod);
{
- Node* value = assembler->Float64Mod(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&return_result);
+ Node* value =
+ Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
- assembler->Return(var_result.value());
+ Bind(&return_result);
+ Return(var_result.value());
}
-void Builtins::Generate_ShiftLeft(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shl(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
+ BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+ return Word32Shl(lhs, shift_count);
+ });
}
-void Builtins::Generate_ShiftRight(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Sar(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) {
+ BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+ return Word32Sar(lhs, shift_count);
+ });
}
-void Builtins::Generate_ShiftRightLogical(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shr(lhs_value, shift_count);
- Node* result = assembler->ChangeUint32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) {
+ BitwiseShiftOp<kUnsigned>([this](Node* lhs, Node* shift_count) {
+ return Word32Shr(lhs, shift_count);
+ });
}
-void Builtins::Generate_BitwiseAnd(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32And(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32And(lhs, rhs); });
}
-void Builtins::Generate_BitwiseOr(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Or(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Or(lhs, rhs); });
}
-void Builtins::Generate_BitwiseXor(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Xor(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Xor(lhs, rhs); });
}
-void Builtins::Generate_LessThan(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kLessThan, lhs, rhs, context));
+TF_BUILTIN(LessThan, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kLessThan);
}
-void Builtins::Generate_LessThanOrEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kLessThanOrEqual);
}
-void Builtins::Generate_GreaterThan(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kGreaterThan, lhs, rhs, context));
+TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kGreaterThan);
}
-void Builtins::Generate_GreaterThanOrEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kGreaterThanOrEqual);
}
-void Builtins::Generate_Equal(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(Equal, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs,
- rhs, context));
+ Return(Equal(kDontNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_NotEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(NotEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(
- assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context));
+ Return(Equal(kNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_StrictEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->StrictEqual(CodeStubAssembler::kDontNegateResult,
- lhs, rhs, context));
+ Return(StrictEqual(kDontNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_StrictNotEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictNotEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->StrictEqual(CodeStubAssembler::kNegateResult,
- lhs, rhs, context));
+ Return(StrictEqual(kNegateResult, lhs, rhs, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index abb5c47555..74e0a20832 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -2,149 +2,68 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
+class ObjectBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void IsString(Node* object, Label* if_string, Label* if_notstring);
+ void ReturnToStringFormat(Node* context, Node* string);
+};
+
// -----------------------------------------------------------------------------
// ES6 section 19.1 Object Objects
-void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
+ Node* object = Parameter(0);
+ Node* key = Parameter(1);
+ Node* context = Parameter(4);
- Node* object = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
-
- Label call_runtime(assembler), return_true(assembler),
- return_false(assembler);
+ Label call_runtime(this), return_true(this), return_false(this);
// Smi receivers do not have own properties.
- Label if_objectisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(object), &return_false,
- &if_objectisnotsmi);
- assembler->Bind(&if_objectisnotsmi);
+ Label if_objectisnotsmi(this);
+ Branch(TaggedIsSmi(object), &return_false, &if_objectisnotsmi);
+ Bind(&if_objectisnotsmi);
- Node* map = assembler->LoadMap(object);
- Node* instance_type = assembler->LoadMapInstanceType(map);
+ Node* map = LoadMap(object);
+ Node* instance_type = LoadMapInstanceType(map);
- Variable var_index(assembler, MachineType::PointerRepresentation());
+ Variable var_index(this, MachineType::PointerRepresentation());
- Label keyisindex(assembler), if_iskeyunique(assembler);
- assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
- &call_runtime);
+ Label keyisindex(this), if_iskeyunique(this);
+ TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &call_runtime);
- assembler->Bind(&if_iskeyunique);
- assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
- &return_false, &call_runtime);
+ Bind(&if_iskeyunique);
+ TryHasOwnProperty(object, map, instance_type, key, &return_true,
+ &return_false, &call_runtime);
- assembler->Bind(&keyisindex);
+ Bind(&keyisindex);
// Handle negative keys in the runtime.
- assembler->GotoIf(assembler->IntPtrLessThan(var_index.value(),
- assembler->IntPtrConstant(0)),
- &call_runtime);
- assembler->TryLookupElement(object, map, instance_type, var_index.value(),
- &return_true, &return_false, &call_runtime);
-
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
- context, object, key));
-}
-
-namespace {
-
-MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
- Handle<Object> next_source) {
- // Non-empty strings are the only non-JSReceivers that need to be handled
- // explicitly by Object.assign.
- if (!next_source->IsJSReceiver()) {
- return Just(!next_source->IsString() ||
- String::cast(*next_source)->length() == 0);
- }
-
- // If the target is deprecated, the object will be updated on first store. If
- // the source for that store equals the target, this will invalidate the
- // cached representation of the source. Preventively upgrade the target.
- // Do this on each iteration since any property load could cause deprecation.
- if (to->map()->is_deprecated()) {
- JSObject::MigrateInstance(Handle<JSObject>::cast(to));
- }
-
- Isolate* isolate = to->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+ GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
+ TryLookupElement(object, map, instance_type, var_index.value(), &return_true,
+ &return_false, &call_runtime);
- if (!map->IsJSObjectMap()) return Just(false);
- if (!map->OnlyHasSimpleProperties()) return Just(false);
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- Handle<JSObject> from = Handle<JSObject>::cast(next_source);
- if (from->elements() != isolate->heap()->empty_fixed_array()) {
- return Just(false);
- }
+ Bind(&return_false);
+ Return(BooleanConstant(false));
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int length = map->NumberOfOwnDescriptors();
-
- bool stable = true;
-
- for (int i = 0; i < length; i++) {
- Handle<Name> next_key(descriptors->GetKey(i), isolate);
- Handle<Object> prop_value;
- // Directly decode from the descriptor array if |from| did not change shape.
- if (stable) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetValue(i), isolate);
- } else {
- Representation representation = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- prop_value = JSObject::FastPropertyAt(from, representation, index);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, JSReceiver::GetProperty(from, next_key),
- Nothing<bool>());
- stable = from->map() == *map;
- }
- } else {
- // If the map did change, do a slower lookup. We are still guaranteed that
- // the object has a simple shape, and that the key is a name.
- LookupIterator it(from, next_key, from,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) continue;
- DCHECK(it.state() == LookupIterator::DATA ||
- it.state() == LookupIterator::ACCESSOR);
- if (!it.IsEnumerable()) continue;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
- }
- LookupIterator it(to, next_key, to);
- bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
- Maybe<bool> result = Object::SetProperty(
- &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
- if (result.IsNothing()) return result;
- if (stable && call_to_js) stable = from->map() == *map;
- }
-
- return Just(true);
+ Bind(&call_runtime);
+ Return(CallRuntime(Runtime::kObjectHasOwnProperty, context, object, key));
}
-} // namespace
-
// ES6 19.1.2.1 Object.assign
BUILTIN(ObjectAssign) {
HandleScope scope(isolate);
@@ -160,44 +79,10 @@ BUILTIN(ObjectAssign) {
// second argument.
// 4. For each element nextSource of sources, in ascending index order,
for (int i = 2; i < args.length(); ++i) {
- Handle<Object> next_source = args.at<Object>(i);
- Maybe<bool> fast_assign = FastAssign(to, next_source);
- if (fast_assign.IsNothing()) return isolate->heap()->exception();
- if (fast_assign.FromJust()) continue;
- // 4a. If nextSource is undefined or null, let keys be an empty List.
- // 4b. Else,
- // 4b i. Let from be ToObject(nextSource).
- // Only non-empty strings and JSReceivers have enumerable properties.
- Handle<JSReceiver> from =
- Object::ToObject(isolate, next_source).ToHandleChecked();
- // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, KeyAccumulator::GetKeys(
- from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
- GetKeysConversion::kKeepNumbers));
- // 4c. Repeat for each element nextKey of keys in List order,
- for (int j = 0; j < keys->length(); ++j) {
- Handle<Object> next_key(keys->get(j), isolate);
- // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
- if (found.IsNothing()) return isolate->heap()->exception();
- // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
- if (found.FromJust() && desc.enumerable()) {
- // 4c ii 1. Let propValue be ? Get(from, nextKey).
- Handle<Object> prop_value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prop_value,
- Runtime::GetObjectProperty(isolate, from, next_key));
- // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
- Handle<Object> status;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
- prop_value, STRICT));
- }
- }
+ Handle<Object> next_source = args.at(i);
+ MAYBE_RETURN(
+ JSReceiver::SetOrCopyDataProperties(isolate, to, next_source, true),
+ isolate->heap()->exception());
}
// 5. Return to.
return *to;
@@ -219,134 +104,90 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
-namespace { // anonymous namespace for ObjectProtoToString()
-
-void IsString(CodeStubAssembler* assembler, compiler::Node* object,
- CodeStubAssembler::Label* if_string,
- CodeStubAssembler::Label* if_notstring) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
+void ObjectBuiltinsAssembler::IsString(Node* object, Label* if_string,
+ Label* if_notstring) {
+ Label if_notsmi(this);
+ Branch(TaggedIsSmi(object), if_notstring, &if_notsmi);
- Label if_notsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(object), if_notstring, &if_notsmi);
-
- assembler->Bind(&if_notsmi);
+ Bind(&if_notsmi);
{
- Node* instance_type = assembler->LoadInstanceType(object);
+ Node* instance_type = LoadInstanceType(object);
- assembler->Branch(assembler->IsStringInstanceType(instance_type), if_string,
- if_notstring);
+ Branch(IsStringInstanceType(instance_type), if_string, if_notstring);
}
}
-void ReturnToStringFormat(CodeStubAssembler* assembler, compiler::Node* context,
- compiler::Node* string) {
- typedef compiler::Node Node;
-
- Node* lhs = assembler->HeapConstant(
- assembler->factory()->NewStringFromStaticChars("[object "));
- Node* rhs = assembler->HeapConstant(
- assembler->factory()->NewStringFromStaticChars("]"));
+void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
+ Node* string) {
+ Node* lhs = HeapConstant(factory()->NewStringFromStaticChars("[object "));
+ Node* rhs = HeapConstant(factory()->NewStringFromStaticChars("]"));
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ Callable callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- assembler->Return(assembler->CallStub(
- callable, context, assembler->CallStub(callable, context, lhs, string),
- rhs));
+ Return(CallStub(callable, context, CallStub(callable, context, lhs, string),
+ rhs));
}
-void ReturnIfPrimitive(CodeStubAssembler* assembler,
- compiler::Node* instance_type,
- CodeStubAssembler::Label* return_string,
- CodeStubAssembler::Label* return_boolean,
- CodeStubAssembler::Label* return_number) {
- assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
- return_string);
-
- assembler->GotoIf(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
- return_boolean);
-
- assembler->GotoIf(
- assembler->Word32Equal(instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- return_number);
-}
-
-} // namespace
-
// ES6 section 19.1.3.6 Object.prototype.toString
-void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label return_undefined(assembler, Label::kDeferred),
- return_null(assembler, Label::kDeferred),
- return_arguments(assembler, Label::kDeferred), return_array(assembler),
- return_api(assembler, Label::kDeferred), return_object(assembler),
- return_regexp(assembler), return_function(assembler),
- return_error(assembler), return_date(assembler), return_string(assembler),
- return_boolean(assembler), return_jsvalue(assembler),
- return_jsproxy(assembler, Label::kDeferred), return_number(assembler);
+TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
+ Label return_undefined(this, Label::kDeferred),
+ return_null(this, Label::kDeferred),
+ return_arguments(this, Label::kDeferred), return_array(this),
+ return_api(this, Label::kDeferred), return_object(this),
+ return_regexp(this), return_function(this), return_error(this),
+ return_date(this), return_jsvalue(this),
+ return_jsproxy(this, Label::kDeferred);
- Label if_isproxy(assembler, Label::kDeferred);
+ Label if_isproxy(this, Label::kDeferred);
- Label checkstringtag(assembler);
- Label if_tostringtag(assembler), if_notostringtag(assembler);
+ Label checkstringtag(this);
+ Label if_tostringtag(this), if_notostringtag(this);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- assembler->GotoIf(
- assembler->WordEqual(receiver, assembler->UndefinedConstant()),
- &return_undefined);
+ GotoIf(WordEqual(receiver, UndefinedConstant()), &return_undefined);
- assembler->GotoIf(assembler->WordEqual(receiver, assembler->NullConstant()),
- &return_null);
+ GotoIf(WordEqual(receiver, NullConstant()), &return_null);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &return_number);
+ Callable to_object = CodeFactory::ToObject(isolate());
+ receiver = CallStub(to_object, context, receiver);
- Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
- ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
- &return_boolean, &return_number);
+ Node* receiver_instance_type = LoadInstanceType(receiver);
// for proxies, check IsArray before getting @@toStringTag
- Variable var_proxy_is_array(assembler, MachineRepresentation::kTagged);
- var_proxy_is_array.Bind(assembler->BooleanConstant(false));
+ Variable var_proxy_is_array(this, MachineRepresentation::kTagged);
+ var_proxy_is_array.Bind(BooleanConstant(false));
- assembler->Branch(
- assembler->Word32Equal(receiver_instance_type,
- assembler->Int32Constant(JS_PROXY_TYPE)),
- &if_isproxy, &checkstringtag);
+ Branch(Word32Equal(receiver_instance_type, Int32Constant(JS_PROXY_TYPE)),
+ &if_isproxy, &checkstringtag);
- assembler->Bind(&if_isproxy);
+ Bind(&if_isproxy);
{
// This can throw
var_proxy_is_array.Bind(
- assembler->CallRuntime(Runtime::kArrayIsArray, context, receiver));
- assembler->Goto(&checkstringtag);
+ CallRuntime(Runtime::kArrayIsArray, context, receiver));
+ Goto(&checkstringtag);
}
- assembler->Bind(&checkstringtag);
+ Bind(&checkstringtag);
{
- Node* to_string_tag_symbol = assembler->HeapConstant(
- assembler->isolate()->factory()->to_string_tag_symbol());
+ Node* to_string_tag_symbol =
+ HeapConstant(isolate()->factory()->to_string_tag_symbol());
- GetPropertyStub stub(assembler->isolate());
+ GetPropertyStub stub(isolate());
Callable get_property =
Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
- Node* to_string_tag_value = assembler->CallStub(
- get_property, context, receiver, to_string_tag_symbol);
+ Node* to_string_tag_value =
+ CallStub(get_property, context, receiver, to_string_tag_symbol);
- IsString(assembler, to_string_tag_value, &if_tostringtag,
- &if_notostringtag);
+ IsString(to_string_tag_value, &if_tostringtag, &if_notostringtag);
- assembler->Bind(&if_tostringtag);
- ReturnToStringFormat(assembler, context, to_string_tag_value);
+ Bind(&if_tostringtag);
+ ReturnToStringFormat(context, to_string_tag_value);
}
- assembler->Bind(&if_notostringtag);
+ Bind(&if_notostringtag);
{
size_t const kNumCases = 11;
Label* case_labels[kNumCases];
@@ -374,178 +215,164 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
case_labels[10] = &return_jsproxy;
case_values[10] = JS_PROXY_TYPE;
- assembler->Switch(receiver_instance_type, &return_object, case_values,
- case_labels, arraysize(case_values));
+ Switch(receiver_instance_type, &return_object, case_values, case_labels,
+ arraysize(case_values));
- assembler->Bind(&return_undefined);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->undefined_to_string()));
+ Bind(&return_undefined);
+ Return(HeapConstant(isolate()->factory()->undefined_to_string()));
- assembler->Bind(&return_null);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->null_to_string()));
+ Bind(&return_null);
+ Return(HeapConstant(isolate()->factory()->null_to_string()));
- assembler->Bind(&return_number);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->number_to_string()));
+ Bind(&return_arguments);
+ Return(HeapConstant(isolate()->factory()->arguments_to_string()));
- assembler->Bind(&return_string);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->string_to_string()));
+ Bind(&return_array);
+ Return(HeapConstant(isolate()->factory()->array_to_string()));
- assembler->Bind(&return_boolean);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->boolean_to_string()));
+ Bind(&return_function);
+ Return(HeapConstant(isolate()->factory()->function_to_string()));
- assembler->Bind(&return_arguments);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->arguments_to_string()));
+ Bind(&return_error);
+ Return(HeapConstant(isolate()->factory()->error_to_string()));
- assembler->Bind(&return_array);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->array_to_string()));
+ Bind(&return_date);
+ Return(HeapConstant(isolate()->factory()->date_to_string()));
- assembler->Bind(&return_function);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->function_to_string()));
+ Bind(&return_regexp);
+ Return(HeapConstant(isolate()->factory()->regexp_to_string()));
- assembler->Bind(&return_error);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->error_to_string()));
-
- assembler->Bind(&return_date);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->date_to_string()));
-
- assembler->Bind(&return_regexp);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->regexp_to_string()));
-
- assembler->Bind(&return_api);
+ Bind(&return_api);
{
- Node* class_name =
- assembler->CallRuntime(Runtime::kClassOf, context, receiver);
- ReturnToStringFormat(assembler, context, class_name);
+ Node* class_name = CallRuntime(Runtime::kClassOf, context, receiver);
+ ReturnToStringFormat(context, class_name);
}
- assembler->Bind(&return_jsvalue);
+ Bind(&return_jsvalue);
{
- Node* value = assembler->LoadJSValueValue(receiver);
- assembler->GotoIf(assembler->TaggedIsSmi(value), &return_number);
+ Label return_boolean(this), return_number(this), return_string(this);
+
+ Node* value = LoadJSValueValue(receiver);
+ GotoIf(TaggedIsSmi(value), &return_number);
+ Node* instance_type = LoadInstanceType(value);
- ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
- &return_string, &return_boolean, &return_number);
- assembler->Goto(&return_object);
+ GotoIf(IsStringInstanceType(instance_type), &return_string);
+ GotoIf(Word32Equal(instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &return_number);
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)),
+ &return_boolean);
+
+ CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+ Goto(&return_object);
+
+ Bind(&return_string);
+ Return(HeapConstant(isolate()->factory()->string_to_string()));
+
+ Bind(&return_number);
+ Return(HeapConstant(isolate()->factory()->number_to_string()));
+
+ Bind(&return_boolean);
+ Return(HeapConstant(isolate()->factory()->boolean_to_string()));
}
- assembler->Bind(&return_jsproxy);
+ Bind(&return_jsproxy);
{
- assembler->GotoIf(assembler->WordEqual(var_proxy_is_array.value(),
- assembler->BooleanConstant(true)),
- &return_array);
+ GotoIf(WordEqual(var_proxy_is_array.value(), BooleanConstant(true)),
+ &return_array);
- Node* map = assembler->LoadMap(receiver);
+ Node* map = LoadMap(receiver);
// Return object if the proxy {receiver} is not callable.
- assembler->Branch(assembler->IsCallableMap(map), &return_function,
- &return_object);
+ Branch(IsCallableMap(map), &return_function, &return_object);
}
// Default
- assembler->Bind(&return_object);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->object_to_string()));
+ Bind(&return_object);
+ Return(HeapConstant(isolate()->factory()->object_to_string()));
}
}
-void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
+ Node* prototype = Parameter(1);
+ Node* properties = Parameter(2);
+ Node* context = Parameter(3 + 2);
- Node* prototype = a->Parameter(1);
- Node* properties = a->Parameter(2);
- Node* context = a->Parameter(3 + 2);
-
- Label call_runtime(a, Label::kDeferred), prototype_valid(a), no_properties(a);
+ Label call_runtime(this, Label::kDeferred), prototype_valid(this),
+ no_properties(this);
{
- a->Comment("Argument 1 check: prototype");
- a->GotoIf(a->WordEqual(prototype, a->NullConstant()), &prototype_valid);
- a->BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
+ Comment("Argument 1 check: prototype");
+ GotoIf(WordEqual(prototype, NullConstant()), &prototype_valid);
+ BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
}
- a->Bind(&prototype_valid);
+ Bind(&prototype_valid);
{
- a->Comment("Argument 2 check: properties");
+ Comment("Argument 2 check: properties");
// Check that we have a simple object
- a->GotoIf(a->TaggedIsSmi(properties), &call_runtime);
+ GotoIf(TaggedIsSmi(properties), &call_runtime);
// Undefined implies no properties.
- a->GotoIf(a->WordEqual(properties, a->UndefinedConstant()), &no_properties);
- Node* properties_map = a->LoadMap(properties);
- a->GotoIf(a->IsSpecialReceiverMap(properties_map), &call_runtime);
+ GotoIf(WordEqual(properties, UndefinedConstant()), &no_properties);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
// Stay on the fast path only if there are no elements.
- a->GotoUnless(a->WordEqual(a->LoadElements(properties),
- a->LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
- &call_runtime);
+ GotoUnless(WordEqual(LoadElements(properties),
+ LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+ &call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
- Node* bit_field3 = a->LoadMapBitField3(properties_map);
- a->GotoIf(a->IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
- a->Branch(a->IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
- &call_runtime, &no_properties);
+ Node* bit_field3 = LoadMapBitField3(properties_map);
+ GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+ Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
+ &call_runtime, &no_properties);
}
// Create a new object with the given prototype.
- a->Bind(&no_properties);
+ Bind(&no_properties);
{
- Variable map(a, MachineRepresentation::kTagged);
- Variable properties(a, MachineRepresentation::kTagged);
- Label non_null_proto(a), instantiate_map(a), good(a);
+ Variable map(this, MachineRepresentation::kTagged);
+ Variable properties(this, MachineRepresentation::kTagged);
+ Label non_null_proto(this), instantiate_map(this), good(this);
- a->Branch(a->WordEqual(prototype, a->NullConstant()), &good,
- &non_null_proto);
+ Branch(WordEqual(prototype, NullConstant()), &good, &non_null_proto);
- a->Bind(&good);
+ Bind(&good);
{
- map.Bind(a->LoadContextElement(
+ map.Bind(LoadContextElement(
context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- properties.Bind(
- a->AllocateNameDictionary(NameDictionary::kInitialCapacity));
- a->Goto(&instantiate_map);
+ properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ Goto(&instantiate_map);
}
- a->Bind(&non_null_proto);
+ Bind(&non_null_proto);
{
- properties.Bind(a->EmptyFixedArrayConstant());
+ properties.Bind(EmptyFixedArrayConstant());
Node* object_function =
- a->LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
- Node* object_function_map = a->LoadObjectField(
+ LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
+ Node* object_function_map = LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset);
map.Bind(object_function_map);
- a->GotoIf(a->WordEqual(prototype, a->LoadMapPrototype(map.value())),
- &instantiate_map);
+ GotoIf(WordEqual(prototype, LoadMapPrototype(map.value())),
+ &instantiate_map);
// Try loading the prototype info.
Node* prototype_info =
- a->LoadMapPrototypeInfo(a->LoadMap(prototype), &call_runtime);
- a->Comment("Load ObjectCreateMap from PrototypeInfo");
+ LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
+ Comment("Load ObjectCreateMap from PrototypeInfo");
Node* weak_cell =
- a->LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
- a->GotoIf(a->WordEqual(weak_cell, a->UndefinedConstant()), &call_runtime);
- map.Bind(a->LoadWeakCellValue(weak_cell, &call_runtime));
- a->Goto(&instantiate_map);
+ LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
+ GotoIf(WordEqual(weak_cell, UndefinedConstant()), &call_runtime);
+ map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
+ Goto(&instantiate_map);
}
- a->Bind(&instantiate_map);
+ Bind(&instantiate_map);
{
- Node* instance =
- a->AllocateJSObjectFromMap(map.value(), properties.value());
- a->Return(instance);
+ Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
+ Return(instance);
}
}
- a->Bind(&call_runtime);
+ Bind(&call_runtime);
{
- a->Return(
- a->CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+ Return(CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
}
}
@@ -553,8 +380,8 @@ void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
BUILTIN(ObjectDefineProperties) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> properties = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> properties = args.at(2);
RETURN_RESULT_OR_FAILURE(
isolate, JSReceiver::DefineProperties(isolate, target, properties));
@@ -564,9 +391,9 @@ BUILTIN(ObjectDefineProperties) {
BUILTIN(ObjectDefineProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> attributes = args.at<Object>(3);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
+ Handle<Object> attributes = args.at(3);
return JSReceiver::DefineProperty(isolate, target, key, attributes);
}
@@ -640,13 +467,33 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
- case LookupIterator::JSPROXY:
- return isolate->heap()->undefined_value();
+ case LookupIterator::JSPROXY: {
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+ isolate, it.GetHolder<JSProxy>(), it.GetName(), &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ if (found.FromJust()) {
+ if (component == ACCESSOR_GETTER && desc.has_get()) {
+ return *desc.get();
+ }
+ if (component == ACCESSOR_SETTER && desc.has_set()) {
+ return *desc.set();
+ }
+ return isolate->heap()->undefined_value();
+ }
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype, JSProxy::GetPrototype(it.GetHolder<JSProxy>()));
+ if (prototype->IsNull(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ return ObjectLookupAccessor(isolate, prototype, key, component);
+ }
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return isolate->heap()->undefined_value();
case LookupIterator::DATA:
- continue;
+ return isolate->heap()->undefined_value();
+
case LookupIterator::ACCESSOR: {
Handle<Object> maybe_pair = it.GetAccessors();
if (maybe_pair->IsAccessorPair()) {
@@ -666,9 +513,9 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
// https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
BUILTIN(ObjectDefineGetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0); // Receiver.
- Handle<Object> name = args.at<Object>(1);
- Handle<Object> getter = args.at<Object>(2);
+ Handle<Object> object = args.at(0); // Receiver.
+ Handle<Object> name = args.at(1);
+ Handle<Object> getter = args.at(2);
return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
}
@@ -676,9 +523,9 @@ BUILTIN(ObjectDefineGetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
BUILTIN(ObjectDefineSetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0); // Receiver.
- Handle<Object> name = args.at<Object>(1);
- Handle<Object> setter = args.at<Object>(2);
+ Handle<Object> object = args.at(0); // Receiver.
+ Handle<Object> name = args.at(1);
+ Handle<Object> setter = args.at(2);
return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
}
@@ -686,8 +533,8 @@ BUILTIN(ObjectDefineSetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
BUILTIN(ObjectLookupGetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> name = args.at<Object>(1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> name = args.at(1);
return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
}
@@ -695,8 +542,8 @@ BUILTIN(ObjectLookupGetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
BUILTIN(ObjectLookupSetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> name = args.at<Object>(1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> name = args.at(1);
return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
}
@@ -731,7 +578,7 @@ BUILTIN(ObjectSetPrototypeOf) {
// 1. Let O be ? RequireObjectCoercible(O).
Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -777,7 +624,7 @@ BUILTIN(ObjectPrototypeSetProto) {
HandleScope scope(isolate);
// 1. Let O be ? RequireObjectCoercible(this value).
Handle<Object> object = args.receiver();
- if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -785,7 +632,7 @@ BUILTIN(ObjectPrototypeSetProto) {
}
// 2. If Type(proto) is neither Object nor Null, return undefined.
- Handle<Object> proto = args.at<Object>(1);
+ Handle<Object> proto = args.at(1);
if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
return isolate->heap()->undefined_value();
}
@@ -860,8 +707,8 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
BUILTIN(ObjectIs) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> value1 = args.at<Object>(1);
- Handle<Object> value2 = args.at<Object>(2);
+ Handle<Object> value1 = args.at(1);
+ Handle<Object> value2 = args.at(2);
return isolate->heap()->ToBoolean(value1->SameValue(*value2));
}
@@ -1022,50 +869,73 @@ BUILTIN(ObjectSeal) {
return *object;
}
-void Builtins::Generate_HasProperty(CodeStubAssembler* assembler) {
+TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
+ typedef CreateIterResultObjectDescriptor Descriptor;
+
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const done = Parameter(Descriptor::kDone);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Node* const result = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
+
+ Return(result);
+}
+
+TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
typedef HasPropertyDescriptor Descriptor;
- typedef compiler::Node Node;
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(
- assembler->HasProperty(object, key, context, Runtime::kHasProperty));
+ Return(HasProperty(object, key, context, Runtime::kHasProperty));
}
-void Builtins::Generate_ForInFilter(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(ForInFilter, ObjectBuiltinsAssembler) {
typedef ForInFilterDescriptor Descriptor;
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(assembler->ForInFilter(key, object, context));
+ Return(ForInFilter(key, object, context));
}
-void Builtins::Generate_InstanceOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
typedef CompareDescriptor Descriptor;
- Node* object = assembler->Parameter(Descriptor::kLeft);
- Node* callable = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Return(assembler->InstanceOf(object, callable, context));
+ Node* object = Parameter(Descriptor::kLeft);
+ Node* callable = Parameter(Descriptor::kRight);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Return(InstanceOf(object, callable, context));
}
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
-void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
typedef CompareDescriptor Descriptor;
- Node* constructor = assembler->Parameter(Descriptor::kLeft);
- Node* object = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* constructor = Parameter(Descriptor::kLeft);
+ Node* object = Parameter(Descriptor::kRight);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Return(OrdinaryHasInstance(context, constructor, object));
+}
+
+TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
+ typedef TypeofDescriptor Descriptor;
+
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(
- assembler->OrdinaryHasInstance(context, constructor, object));
+ Return(GetSuperConstructor(object, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 9f5d7c88d7..8a2eab06fc 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -2,82 +2,1554 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-promise.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-
-#include "src/promise-utils.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-BUILTIN(PromiseResolveClosure) {
- HandleScope scope(isolate);
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance = AllocateJSObjectFromMap(initial_map);
+ return instance;
+}
+
+void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
+ StoreObjectField(promise, JSPromise::kStatusOffset,
+ SmiConstant(v8::Promise::kPending));
+ StoreObjectField(promise, JSPromise::kFlagsOffset, SmiConstant(0));
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) {
+ return AllocateAndInitJSPromise(context, UndefinedConstant());
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
+ Node* parent) {
+ Node* const instance = AllocateJSPromise(context);
+ PromiseInit(instance);
+
+ Label out(this);
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance, parent);
+ Goto(&out);
- Handle<Context> context(isolate->context(), isolate);
+ Bind(&out);
+ return instance;
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
+ Node* status,
+ Node* result) {
+ CSA_ASSERT(this, TaggedIsSmi(status));
+
+ Node* const instance = AllocateJSPromise(context);
- if (PromiseUtils::HasAlreadyVisited(context)) {
- return isolate->heap()->undefined_value();
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kStatusOffset, status);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
+ SmiConstant(0));
+
+ Label out(this);
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance,
+ UndefinedConstant());
+ Goto(&out);
+
+ Bind(&out);
+ return instance;
+}
+
+std::pair<Node*, Node*>
+PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
+ Node* promise, Node* debug_event, Node* native_context) {
+ Node* const promise_context = CreatePromiseResolvingFunctionsContext(
+ promise, debug_event, native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info =
+ LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
+ Node* const reject_info =
+ LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+ Node* const reject =
+ AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
+ return std::make_pair(resolve, reject);
+}
+
+Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
+ Node* constructor,
+ Node* debug_event) {
+ if (debug_event == nullptr) {
+ debug_event = TrueConstant();
}
- PromiseUtils::SetAlreadyVisited(context);
- Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Node* native_context = LoadNativeContext(context);
+
+ Node* map = LoadRoot(Heap::kJSPromiseCapabilityMapRootIndex);
+ Node* capability = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kPromiseOffset, UndefinedConstant());
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kResolveOffset, UndefinedConstant());
+ StoreObjectFieldNoWriteBarrier(capability, JSPromiseCapability::kRejectOffset,
+ UndefinedConstant());
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(capability);
+
+ Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
+ out(this);
+ Branch(WordEqual(constructor,
+ LoadContextElement(native_context,
+ Context::PROMISE_FUNCTION_INDEX)),
+ &if_builtin_promise, &if_custom_promise);
+
+ Bind(&if_builtin_promise);
+ {
+ Node* promise = AllocateJSPromise(context);
+ PromiseInit(promise);
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kPromiseOffset, promise);
+
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+
+ std::tie(resolve, reject) =
+ CreatePromiseResolvingFunctions(promise, debug_event, native_context);
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, promise,
+ UndefinedConstant());
+ Goto(&out);
+ }
+
+ Bind(&if_custom_promise);
+ {
+ Label if_notcallable(this, Label::kDeferred);
+ Node* executor_context =
+ CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
+ Node* executor_info = LoadContextElement(
+ native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
+ Node* function_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* executor = AllocateFunctionWithMapAndContext(
+ function_map, executor_info, executor_context);
+
+ Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, executor);
+
+ Node* resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ GotoIf(TaggedIsSmi(resolve), &if_notcallable);
+ GotoUnless(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+
+ Node* reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ GotoIf(TaggedIsSmi(reject), &if_notcallable);
+ GotoUnless(IsCallableMap(LoadMap(reject)), &if_notcallable);
+
+ StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
+
+ Goto(&out);
+
+ Bind(&if_notcallable);
+ Node* message = SmiConstant(MessageTemplate::kPromiseNonCallable);
+ StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
+ UndefinedConstant());
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
+ UndefinedConstant());
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
+ UndefinedConstant());
+ CallRuntime(Runtime::kThrowTypeError, context, message);
+ var_result.Bind(UndefinedConstant());
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
+ int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const context = Allocate(FixedArray::SizeFor(slots));
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
+ SmiConstant(slots));
+
+ Node* const empty_fn =
+ LoadContextElement(native_context, Context::CLOSURE_INDEX);
+ StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
+ StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
+ UndefinedConstant());
+ StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
+ TheHoleConstant());
+ StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
+ native_context);
+ return context;
+}
- MaybeHandle<Object> maybe_result;
- Handle<Object> argv[] = {promise, value};
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Execution::Call(isolate, isolate->promise_resolve(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv));
- return isolate->heap()->undefined_value();
+Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
+ Node* promise, Node* debug_event, Node* native_context) {
+ Node* const context =
+ CreatePromiseContext(native_context, kPromiseContextLength);
+ StoreContextElementNoWriteBarrier(context, kAlreadyVisitedSlot,
+ SmiConstant(0));
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, promise);
+ StoreContextElementNoWriteBarrier(context, kDebugEventSlot, debug_event);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
+ Node* promise_capability, Node* native_context) {
+ int kContextLength = kCapabilitiesContextLength;
+ Node* context = CreatePromiseContext(native_context, kContextLength);
+ StoreContextElementNoWriteBarrier(context, kCapabilitySlot,
+ promise_capability);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
+ Node* context, Node* value, MessageTemplate::Template msg_template,
+ const char* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Variable var_value_map(this, MachineRepresentation::kTagged);
+
+ GotoIf(TaggedIsSmi(value), &throw_exception);
+
+ // Load the instance type of the {value}.
+ var_value_map.Bind(LoadMap(value));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+ Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ Bind(&throw_exception);
+ {
+ Node* const method =
+ method_name == nullptr
+ ? UndefinedConstant()
+ : HeapConstant(
+ isolate()->factory()->NewStringFromAsciiChecked(method_name));
+ Node* const message_id = SmiConstant(msg_template);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
+ var_value_map.Bind(UndefinedConstant());
+ Goto(&out); // Never reached.
+ }
+
+ Bind(&out);
+ return var_value_map.value();
+}
+
+Node* PromiseBuiltinsAssembler::PromiseHasHandler(Node* promise) {
+ Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+ return IsSetWord(SmiUntag(flags), 1 << JSPromise::kHasHandlerBit);
+}
+
+void PromiseBuiltinsAssembler::PromiseSetHasHandler(Node* promise) {
+ Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+ Node* const new_flags =
+ SmiOr(flags, SmiConstant(1 << JSPromise::kHasHandlerBit));
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
+}
+
+Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor) {
+ Isolate* isolate = this->isolate();
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(default_constructor);
+
+ // 2. Let C be ? Get(O, "constructor").
+ Node* const constructor_str =
+ HeapConstant(isolate->factory()->constructor_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const constructor =
+ CallStub(getproperty_callable, context, object, constructor_str);
+
+ // 3. If C is undefined, return defaultConstructor.
+ Label out(this);
+ GotoIf(IsUndefined(constructor), &out);
+
+ // 4. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, constructor,
+ MessageTemplate::kConstructorNotReceiver);
+
+ // 5. Let S be ? Get(C, @@species).
+ Node* const species_symbol =
+ HeapConstant(isolate->factory()->species_symbol());
+ Node* const species =
+ CallStub(getproperty_callable, context, constructor, species_symbol);
+
+ // 6. If S is either undefined or null, return defaultConstructor.
+ GotoIf(IsUndefined(species), &out);
+ GotoIf(WordEqual(species, NullConstant()), &out);
+
+ // 7. If IsConstructor(S) is true, return S.
+ Label throw_error(this);
+ Node* species_bitfield = LoadMapBitField(LoadMap(species));
+ GotoUnless(Word32Equal(Word32And(species_bitfield,
+ Int32Constant((1 << Map::kIsConstructor))),
+ Int32Constant(1 << Map::kIsConstructor)),
+ &throw_error);
+ var_result.Bind(species);
+ Goto(&out);
+
+ // 8. Throw a TypeError exception.
+ Bind(&throw_error);
+ {
+ Node* const message_id =
+ SmiConstant(MessageTemplate::kSpeciesNotConstructor);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
+ Node* value) {
+ Node* elements = LoadObjectField(promise, offset);
+ Node* length = LoadFixedArrayBaseLength(elements);
+ CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
+ length = TaggedToParameter(length, mode);
+
+ Node* delta = IntPtrOrSmiConstant(1, mode);
+ Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+ const ElementsKind kind = FAST_ELEMENTS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ const CodeStubAssembler::AllocationFlags flags =
+ CodeStubAssembler::kAllowLargeObjectAllocation;
+ int additional_offset = 0;
+
+ Node* new_elements = AllocateFixedArray(kind, new_capacity, mode, flags);
+
+ CopyFixedArrayElements(kind, elements, new_elements, length, barrier_mode,
+ mode);
+ StoreFixedArrayElement(new_elements, length, value, barrier_mode,
+ additional_offset, mode);
+
+ StoreObjectField(promise, offset, new_elements);
+}
+
+Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
+ Node* promise,
+ Node* on_resolve,
+ Node* on_reject) {
+ Isolate* isolate = this->isolate();
+
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.then");
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Node* constructor = SpeciesConstructor(context, promise, promise_fun);
+
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ Callable call_callable = CodeFactory::Call(isolate);
+ Label fast_promise_capability(this), promise_capability(this),
+ perform_promise_then(this);
+ Variable var_deferred_promise(this, MachineRepresentation::kTagged),
+ var_deferred_on_resolve(this, MachineRepresentation::kTagged),
+ var_deferred_on_reject(this, MachineRepresentation::kTagged);
+
+ Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
+ &promise_capability);
+
+ Bind(&fast_promise_capability);
+ {
+ Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
+ var_deferred_promise.Bind(deferred_promise);
+ var_deferred_on_resolve.Bind(UndefinedConstant());
+ var_deferred_on_reject.Bind(UndefinedConstant());
+ Goto(&perform_promise_then);
+ }
+
+ Bind(&promise_capability);
+ {
+ Node* const capability = NewPromiseCapability(context, constructor);
+ var_deferred_promise.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ var_deferred_on_resolve.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset));
+ var_deferred_on_reject.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ Goto(&perform_promise_then);
+ }
+
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ Bind(&perform_promise_then);
+ Node* const result = InternalPerformPromiseThen(
+ context, promise, on_resolve, on_reject, var_deferred_promise.value(),
+ var_deferred_on_resolve.value(), var_deferred_on_reject.value());
+ return result;
+}
+
+Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
+ Node* context, Node* promise, Node* on_resolve, Node* on_reject,
+ Node* deferred_promise, Node* deferred_on_resolve,
+ Node* deferred_on_reject) {
+ Node* const native_context = LoadNativeContext(context);
+
+ Variable var_on_resolve(this, MachineRepresentation::kTagged),
+ var_on_reject(this, MachineRepresentation::kTagged);
+
+ var_on_resolve.Bind(on_resolve);
+ var_on_reject.Bind(on_reject);
+
+ Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
+ append_callbacks(this);
+ GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
+
+ Node* const on_resolve_map = LoadMap(on_resolve);
+ Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
+ &if_onresolvenotcallable);
+
+ Bind(&if_onresolvenotcallable);
+ {
+ var_on_resolve.Bind(LoadContextElement(
+ native_context, Context::PROMISE_ID_RESOLVE_HANDLER_INDEX));
+ Goto(&onrejectcheck);
+ }
+
+ Bind(&onrejectcheck);
+ {
+ Label if_onrejectnotcallable(this);
+ GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
+
+ Node* const on_reject_map = LoadMap(on_reject);
+ Branch(IsCallableMap(on_reject_map), &append_callbacks,
+ &if_onrejectnotcallable);
+
+ Bind(&if_onrejectnotcallable);
+ {
+ var_on_reject.Bind(LoadContextElement(
+ native_context, Context::PROMISE_ID_REJECT_HANDLER_INDEX));
+ Goto(&append_callbacks);
+ }
+ }
+
+ Bind(&append_callbacks);
+ {
+ Label fulfilled_check(this);
+ Node* const status = LoadObjectField(promise, JSPromise::kStatusOffset);
+ GotoUnless(SmiEqual(status, SmiConstant(v8::Promise::kPending)),
+ &fulfilled_check);
+
+ Node* const existing_deferred_promise =
+ LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+ Label if_noexistingcallbacks(this), if_existingcallbacks(this);
+ Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
+ &if_existingcallbacks);
+
+ Bind(&if_noexistingcallbacks);
+ {
+ // Store callbacks directly in the slots.
+ StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+ deferred_promise);
+ StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+ deferred_on_resolve);
+ StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+ deferred_on_reject);
+ StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+ var_on_resolve.value());
+ StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+ var_on_reject.value());
+ Goto(&out);
+ }
+
+ Bind(&if_existingcallbacks);
+ {
+ Label if_singlecallback(this), if_multiplecallbacks(this);
+ BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
+ &if_multiplecallbacks);
+
+ Bind(&if_singlecallback);
+ {
+ // Create new FixedArrays to store callbacks, and migrate
+ // existing callbacks.
+ Node* const deferred_promise_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(deferred_promise_arr, 0,
+ existing_deferred_promise);
+ StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
+
+ Node* const deferred_on_resolve_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ deferred_on_resolve_arr, 0,
+ LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
+ StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
+
+ Node* const deferred_on_reject_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ deferred_on_reject_arr, 0,
+ LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
+ StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
+
+ Node* const fulfill_reactions =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ fulfill_reactions, 0,
+ LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
+ StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
+
+ Node* const reject_reactions =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ reject_reactions, 0,
+ LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
+ StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
+
+ // Store new FixedArrays in promise.
+ StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+ deferred_promise_arr);
+ StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+ deferred_on_resolve_arr);
+ StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+ deferred_on_reject_arr);
+ StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+ fulfill_reactions);
+ StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+ reject_reactions);
+ Goto(&out);
+ }
+
+ Bind(&if_multiplecallbacks);
+ {
+ AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
+ deferred_promise);
+ AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
+ deferred_on_resolve);
+ AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
+ deferred_on_reject);
+ AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
+ var_on_resolve.value());
+ AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
+ var_on_reject.value());
+ Goto(&out);
+ }
+ }
+
+ Bind(&fulfilled_check);
+ {
+ Label reject(this);
+ Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
+ GotoUnless(WordEqual(status, SmiConstant(v8::Promise::kFulfilled)),
+ &reject);
+
+ Node* info = AllocatePromiseReactionJobInfo(
+ result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
+ deferred_on_reject, context);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
+ SmiConstant(v8::Promise::kFulfilled));
+ Goto(&out);
+
+ Bind(&reject);
+ {
+ Node* const has_handler = PromiseHasHandler(promise);
+ Label enqueue(this);
+
+ // TODO(gsathya): Fold these runtime calls and move to TF.
+ GotoIf(has_handler, &enqueue);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+ Goto(&enqueue);
+
+ Bind(&enqueue);
+ {
+ Node* info = AllocatePromiseReactionJobInfo(
+ result, var_on_reject.value(), deferred_promise,
+ deferred_on_resolve, deferred_on_reject, context);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise,
+ info, SmiConstant(v8::Promise::kRejected));
+ Goto(&out);
+ }
+ }
+ }
+ }
+
+ Bind(&out);
+ PromiseSetHasHandler(promise);
+ return deferred_promise;
+}
+
+// Promise fast path implementations rely on unmodified JSPromise instances.
+// We use a fairly coarse granularity for this and simply check whether both
+// the promise itself is unmodified (i.e. its map has not changed) and its
+// prototype is unmodified.
+// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
+ if_ismodified);
+}
+
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
+ Node* promise_fun,
+ Node* promise,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this,
+ WordEqual(promise_fun,
+ LoadContextElement(native_context,
+ Context::PROMISE_FUNCTION_INDEX)));
+
+ Node* const map = LoadMap(promise);
+ Node* const initial_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
+
+ GotoUnless(has_initialmap, if_ismodified);
+
+ Node* const initial_proto_initial_map =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
+ Node* const proto_map = LoadMap(LoadMapPrototype(map));
+ Node* const proto_has_initialmap =
+ WordEqual(proto_map, initial_proto_initial_map);
+
+ Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
+ Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
+ Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
+ StoreMapNoWriteBarrier(info,
+ Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kThenOffset, then);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+ StoreObjectFieldNoWriteBarrier(info,
+ PromiseResolveThenableJobInfo::kDebugIdOffset,
+ SmiConstant(kDebugPromiseNoID));
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kContextOffset, context);
+ return info;
+}
+
+void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
+ Node* promise,
+ Node* result) {
+ Isolate* isolate = this->isolate();
+
+ Variable var_reason(this, MachineRepresentation::kTagged),
+ var_then(this, MachineRepresentation::kTagged);
+
+ Label do_enqueue(this), fulfill(this), if_cycle(this, Label::kDeferred),
+ if_rejectpromise(this, Label::kDeferred), out(this);
+
+ Label cycle_check(this);
+ GotoUnless(IsPromiseHookEnabled(), &cycle_check);
+ CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+ Goto(&cycle_check);
+
+ Bind(&cycle_check);
+ // 6. If SameValue(resolution, promise) is true, then
+ GotoIf(SameValue(promise, result, context), &if_cycle);
+
+ // 7. If Type(resolution) is not Object, then
+ GotoIf(TaggedIsSmi(result), &fulfill);
+ GotoUnless(IsJSReceiver(result), &fulfill);
+
+ Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
+ &if_notnativepromise);
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ Bind(&if_nativepromise);
+ {
+ Node* const thenable_status =
+ LoadObjectField(result, JSPromise::kStatusOffset);
+ Node* const thenable_value =
+ LoadObjectField(result, JSPromise::kResultOffset);
+
+ Label if_isnotpending(this);
+ GotoUnless(SmiEqual(SmiConstant(v8::Promise::kPending), thenable_status),
+ &if_isnotpending);
+
+ // TODO(gsathya): Use a marker here instead of the actual then
+ // callback, and check for the marker in PromiseResolveThenableJob
+ // and perform PromiseThen.
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+
+ Bind(&if_isnotpending);
+ {
+ Label if_fulfilled(this), if_rejected(this);
+ Branch(SmiEqual(SmiConstant(v8::Promise::kFulfilled), thenable_status),
+ &if_fulfilled, &if_rejected);
+
+ Bind(&if_fulfilled);
+ {
+ PromiseFulfill(context, promise, thenable_value,
+ v8::Promise::kFulfilled);
+ PromiseSetHasHandler(promise);
+ Goto(&out);
+ }
+
+ Bind(&if_rejected);
+ {
+ Label reject(this);
+ Node* const has_handler = PromiseHasHandler(result);
+
+ // Promise has already been rejected, but had no handler.
+ // Revoke previously triggered reject event.
+ GotoIf(has_handler, &reject);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, result);
+ Goto(&reject);
+
+ Bind(&reject);
+ // Don't cause a debug event as this case is forwarding a rejection
+ InternalPromiseReject(context, promise, thenable_value, false);
+ PromiseSetHasHandler(result);
+ Goto(&out);
+ }
+ }
+ }
+
+ Bind(&if_notnativepromise);
+ {
+ // 8. Let then be Get(resolution, "then").
+ Node* const then_str = HeapConstant(isolate->factory()->then_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const then =
+ CallStub(getproperty_callable, context, result, then_str);
+
+ // 9. If then is an abrupt completion, then
+ GotoIfException(then, &if_rejectpromise, &var_reason);
+
+ // 11. If IsCallable(thenAction) is false, then
+ GotoIf(TaggedIsSmi(then), &fulfill);
+ Node* const then_map = LoadMap(then);
+ GotoUnless(IsCallableMap(then_map), &fulfill);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ Bind(&do_enqueue);
+ {
+ // TODO(gsathya): Add fast path for native promises with unmodified
+ // PromiseThen (which don't need these resolving functions, but
+ // instead can just call resolve/reject directly).
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ promise, FalseConstant(), native_context);
+
+ Node* const info = AllocatePromiseResolveThenableJobInfo(
+ result, var_then.value(), resolve, reject, context);
+
+ Label enqueue(this);
+ GotoUnless(IsDebugActive(), &enqueue);
+
+ Node* const debug_id =
+ CallRuntime(Runtime::kDebugNextAsyncTaskId, context, promise);
+ StoreObjectField(info, PromiseResolveThenableJobInfo::kDebugIdOffset,
+ debug_id);
+
+ GotoIf(TaggedIsSmi(result), &enqueue);
+ GotoUnless(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
+
+ // Mark the dependency of the new promise on the resolution
+ Node* const key =
+ HeapConstant(isolate->factory()->promise_handled_by_symbol());
+ CallRuntime(Runtime::kSetProperty, context, result, key, promise,
+ SmiConstant(STRICT));
+ Goto(&enqueue);
+
+ // 12. Perform EnqueueJob("PromiseJobs",
+ // PromiseResolveThenableJob, « promise, resolution, thenAction»).
+ Bind(&enqueue);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info);
+ Goto(&out);
+ }
+
+ // 7.b Return FulfillPromise(promise, resolution).
+ Bind(&fulfill);
+ {
+ PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
+ Goto(&out);
+ }
+
+ Bind(&if_cycle);
+ {
+ // 6.a Let selfResolutionError be a newly created TypeError object.
+ Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
+ Node* const error =
+ CallRuntime(Runtime::kNewTypeError, context, message_id, result);
+ var_reason.Bind(error);
+
+ // 6.b Return RejectPromise(promise, selfResolutionError).
+ Goto(&if_rejectpromise);
+ }
+
+ // 9.a Return RejectPromise(promise, then.[[Value]]).
+ Bind(&if_rejectpromise);
+ {
+ InternalPromiseReject(context, promise, var_reason.value(), true);
+ Goto(&out);
+ }
+
+ Bind(&out);
+}
+
+void PromiseBuiltinsAssembler::PromiseFulfill(
+ Node* context, Node* promise, Node* result,
+ v8::Promise::PromiseState status) {
+ Label do_promisereset(this), debug_async_event_enqueue_recurring(this);
+
+ Node* const status_smi = SmiConstant(static_cast<int>(status));
+ Node* const deferred_promise =
+ LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+ GotoIf(IsUndefined(deferred_promise), &debug_async_event_enqueue_recurring);
+
+ Node* const tasks =
+ status == v8::Promise::kFulfilled
+ ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
+ : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
+
+ Node* const deferred_on_resolve =
+ LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
+ Node* const deferred_on_reject =
+ LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+
+ Node* const info = AllocatePromiseReactionJobInfo(
+ result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
+ context);
+
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
+ status_smi);
+ Goto(&debug_async_event_enqueue_recurring);
+
+ Bind(&debug_async_event_enqueue_recurring);
+ {
+ GotoUnless(IsDebugActive(), &do_promisereset);
+ CallRuntime(Runtime::kDebugAsyncEventEnqueueRecurring, context, promise,
+ status_smi);
+ Goto(&do_promisereset);
+ }
+
+ Bind(&do_promisereset);
+ {
+ StoreObjectField(promise, JSPromise::kStatusOffset, status_smi);
+ StoreObjectField(promise, JSPromise::kResultOffset, result);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
+ Heap::kUndefinedValueRootIndex);
+ }
+}
+
+void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
+ Node* context, Node* native_context, Node* promise_constructor,
+ Node* executor, Label* if_noaccess) {
+ Variable var_executor(this, MachineRepresentation::kTagged);
+ var_executor.Bind(executor);
+ Label has_access(this), call_runtime(this, Label::kDeferred);
+
+ // If executor is a bound function, load the bound function until we've
+ // reached an actual function.
+ Label found_function(this), loop_over_bound_function(this, &var_executor);
+ Goto(&loop_over_bound_function);
+ Bind(&loop_over_bound_function);
+ {
+ Node* executor_type = LoadInstanceType(var_executor.value());
+ GotoIf(InstanceTypeEqual(executor_type, JS_FUNCTION_TYPE), &found_function);
+ GotoUnless(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
+ &call_runtime);
+ var_executor.Bind(LoadObjectField(
+ var_executor.value(), JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop_over_bound_function);
+ }
+
+ // Load the context from the function and compare it to the Promise
+ // constructor's context. If they match, everything is fine, otherwise, bail
+ // out to the runtime.
+ Bind(&found_function);
+ {
+ Node* function_context =
+ LoadObjectField(var_executor.value(), JSFunction::kContextOffset);
+ Node* native_function_context = LoadNativeContext(function_context);
+ Branch(WordEqual(native_context, native_function_context), &has_access,
+ &call_runtime);
+ }
+
+ Bind(&call_runtime);
+ {
+ Branch(WordEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
+ promise_constructor),
+ BooleanConstant(true)),
+ &has_access, if_noaccess);
+ }
+
+ Bind(&has_access);
+}
+
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+ Node* promise, Node* value,
+ Node* debug_event) {
+ Label out(this);
+ GotoUnless(IsDebugActive(), &out);
+ GotoUnless(WordEqual(TrueConstant(), debug_event), &out);
+ CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+ Goto(&out);
+
+ Bind(&out);
+ InternalPromiseReject(context, promise, value, false);
+}
+
+// This duplicates a lot of logic from PromiseRejectEvent in
+// runtime-promise.cc
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+ Node* promise, Node* value,
+ bool debug_event) {
+ Label fulfill(this), report_unhandledpromise(this), run_promise_hook(this);
+
+ if (debug_event) {
+ GotoUnless(IsDebugActive(), &run_promise_hook);
+ CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+ Goto(&run_promise_hook);
+ } else {
+ Goto(&run_promise_hook);
+ }
+
+ Bind(&run_promise_hook);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &report_unhandledpromise);
+ CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+ Goto(&report_unhandledpromise);
+ }
+
+ Bind(&report_unhandledpromise);
+ {
+ GotoIf(PromiseHasHandler(promise), &fulfill);
+ CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
+ Goto(&fulfill);
+ }
+
+ Bind(&fulfill);
+ PromiseFulfill(context, promise, value, v8::Promise::kRejected);
}
// ES#sec-promise-reject-functions
// Promise Reject Functions
-BUILTIN(PromiseRejectClosure) {
- HandleScope scope(isolate);
+TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label out(this);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ int has_already_visited_slot = kAlreadyVisitedSlot;
+
+ Node* const has_already_visited =
+ LoadContextElement(context, has_already_visited_slot);
+
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+ // 5.Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+ SmiConstant(1));
+
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise =
+ LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+ Node* const debug_event =
+ LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
+
+ InternalPromiseReject(context, promise, value, debug_event);
+ Return(UndefinedConstant());
+
+ Bind(&out);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
+ Node* const executor = Parameter(1);
+ Node* const new_target = Parameter(2);
+ Node* const context = Parameter(4);
+ Isolate* isolate = this->isolate();
+
+ Label if_targetisundefined(this, Label::kDeferred);
+
+ GotoIf(IsUndefined(new_target), &if_targetisundefined);
+
+ Label if_notcallable(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(executor), &if_notcallable);
+
+ Node* const executor_map = LoadMap(executor);
+ GotoUnless(IsCallableMap(executor_map), &if_notcallable);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const is_debug_active = IsDebugActive();
+ Label if_targetisnotmodified(this),
+ if_targetismodified(this, Label::kDeferred), run_executor(this),
+ debug_push(this), if_noaccess(this, Label::kDeferred);
+
+ BranchIfAccessCheckFailed(context, native_context, promise_fun, executor,
+ &if_noaccess);
+
+ Branch(WordEqual(promise_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ Variable var_result(this, MachineRepresentation::kTagged),
+ var_reject_call(this, MachineRepresentation::kTagged),
+ var_reason(this, MachineRepresentation::kTagged);
+
+ Bind(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateAndInitJSPromise(context);
+ var_result.Bind(instance);
+ Goto(&debug_push);
+ }
+
+ Bind(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, promise_fun, new_target);
+ PromiseInit(instance);
+ var_result.Bind(instance);
+
+ GotoUnless(IsPromiseHookEnabled(), &debug_push);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance,
+ UndefinedConstant());
+ Goto(&debug_push);
+ }
+
+ Bind(&debug_push);
+ {
+ GotoUnless(is_debug_active, &run_executor);
+ CallRuntime(Runtime::kDebugPushPromise, context, var_result.value());
+ Goto(&run_executor);
+ }
+
+ Bind(&run_executor);
+ {
+ Label out(this), if_rejectpromise(this), debug_pop(this, Label::kDeferred);
+
+ Node *resolve, *reject;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ var_result.value(), TrueConstant(), native_context);
+ Callable call_callable = CodeFactory::Call(isolate);
+
+ Node* const maybe_exception = CallJS(call_callable, context, executor,
+ UndefinedConstant(), resolve, reject);
+
+ GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+ Branch(is_debug_active, &debug_pop, &out);
+
+ Bind(&if_rejectpromise);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ CallJS(call_callable, context, reject, UndefinedConstant(),
+ var_reason.value());
+ Branch(is_debug_active, &debug_pop, &out);
+ }
+
+ Bind(&debug_pop);
+ {
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Goto(&out);
+ }
+ Bind(&out);
+ Return(var_result.value());
+ }
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ Bind(&if_targetisundefined);
+ {
+ Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
+ Return(UndefinedConstant()); // Never reached.
+ }
+
+ // 2. If IsCallable(executor) is false, throw a TypeError exception.
+ Bind(&if_notcallable);
+ {
+ Node* const message_id =
+ SmiConstant(MessageTemplate::kResolverNotAFunction);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
+ Return(UndefinedConstant()); // Never reached.
+ }
+
+ // Silently fail if the stack looks fishy.
+ Bind(&if_noaccess);
+ {
+ Node* const counter_id =
+ SmiConstant(v8::Isolate::kPromiseConstructorReturnedUndefined);
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_id);
+ Return(UndefinedConstant());
+ }
+}
+
+TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
+ Node* const parent = Parameter(1);
+ Node* const context = Parameter(4);
+ Return(AllocateAndInitJSPromise(context, parent));
+}
+
+TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
+ Node* const maybe_promise = Parameter(1);
+ Label if_notpromise(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(maybe_promise), &if_notpromise);
+
+ Node* const result =
+ SelectBooleanConstant(HasInstanceType(maybe_promise, JS_PROMISE_TYPE));
+ Return(result);
+
+ Bind(&if_notpromise);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const on_resolve = Parameter(2);
+ Node* const on_reject = Parameter(3);
+ Node* const deferred_promise = Parameter(4);
+ Node* const context = Parameter(7);
+
+ // No deferred_on_resolve/deferred_on_reject because this is just an
+ // internal promise created by async-await.
+ Node* const result = InternalPerformPromiseThen(
+ context, promise, on_resolve, on_reject, deferred_promise,
+ UndefinedConstant(), UndefinedConstant());
+
+ // TODO(gsathya): This is unused, but value is returned according to spec.
+ Return(result);
+}
+
+// ES#sec-promise.prototype.then
+// Promise.prototype.catch ( onFulfilled, onRejected )
+TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const promise = Parameter(0);
+ Node* const on_resolve = Parameter(1);
+ Node* const on_reject = Parameter(2);
+ Node* const context = Parameter(5);
+
+ Node* const result =
+ InternalPromiseThen(context, promise, on_resolve, on_reject);
+ Return(result);
+}
+
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions
+TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label out(this);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ int has_already_visited_slot = kAlreadyVisitedSlot;
+
+ Node* const has_already_visited =
+ LoadContextElement(context, has_already_visited_slot);
+
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+ // 5.Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+ SmiConstant(1));
+
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise =
+ LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+
+ InternalResolvePromise(context, promise, value);
+ Return(UndefinedConstant());
+
+ Bind(&out);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const result = Parameter(2);
+ Node* const context = Parameter(5);
+
+ InternalResolvePromise(context, promise, result);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
+ typedef PromiseHandleRejectDescriptor Descriptor;
+
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const on_reject = Parameter(Descriptor::kOnReject);
+ Node* const exception = Parameter(Descriptor::kException);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Callable call_callable = CodeFactory::Call(isolate());
+ Variable var_unused(this, MachineRepresentation::kTagged);
+
+ Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+ Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+
+ Bind(&if_internalhandler);
+ {
+ InternalPromiseReject(context, promise, exception, false);
+ Return(UndefinedConstant());
+ }
+
+ Bind(&if_customhandler);
+ {
+ CallJS(call_callable, context, on_reject, UndefinedConstant(), exception);
+ Return(UndefinedConstant());
+ }
+}
+
+TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const handler = Parameter(2);
+ Node* const deferred_promise = Parameter(3);
+ Node* const deferred_on_resolve = Parameter(4);
+ Node* const deferred_on_reject = Parameter(5);
+ Node* const context = Parameter(8);
+ Isolate* isolate = this->isolate();
+
+ Variable var_reason(this, MachineRepresentation::kTagged);
+
+ Node* const is_debug_active = IsDebugActive();
+ Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
+ promisehook_after(this), debug_pop(this);
+
+ GotoUnless(is_debug_active, &promisehook_before);
+ CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
+ Goto(&promisehook_before);
+
+ Bind(&promisehook_before);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &run_handler);
+ CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
+ Goto(&run_handler);
+ }
+
+ Bind(&run_handler);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ CallJS(call_callable, context, handler, UndefinedConstant(), value);
+
+ GotoIfException(result, &if_rejectpromise, &var_reason);
+
+ Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+ Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+ &if_customhandler);
+
+ Bind(&if_internalhandler);
+ InternalResolvePromise(context, deferred_promise, result);
+ Goto(&promisehook_after);
+
+ Bind(&if_customhandler);
+ {
+ Node* const maybe_exception =
+ CallJS(call_callable, context, deferred_on_resolve,
+ UndefinedConstant(), result);
+ GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+ Goto(&promisehook_after);
+ }
+ }
+
+ Bind(&if_rejectpromise);
+ {
+ Callable promise_handle_reject = CodeFactory::PromiseHandleReject(isolate);
+ CallStub(promise_handle_reject, context, deferred_promise,
+ deferred_on_reject, var_reason.value());
+ Goto(&promisehook_after);
+ }
+
+ Bind(&promisehook_after);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &debug_pop);
+ CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
+ Goto(&debug_pop);
+ }
+
+ Bind(&debug_pop);
+ {
+ Label out(this);
+
+ GotoUnless(is_debug_active, &out);
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Goto(&out);
+
+ Bind(&out);
+ Return(UndefinedConstant());
+ }
+}
+
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const promise = Parameter(0);
+ Node* const on_resolve = UndefinedConstant();
+ Node* const on_reject = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label if_internalthen(this), if_customthen(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(promise), &if_customthen);
+ BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
+
+ Bind(&if_internalthen);
+ {
+ Node* const result =
+ InternalPromiseThen(context, promise, on_resolve, on_reject);
+ Return(result);
+ }
+
+ Bind(&if_customthen);
+ {
+ Isolate* isolate = this->isolate();
+ Node* const then_str = HeapConstant(isolate->factory()->then_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const then =
+ CallStub(getproperty_callable, context, promise, then_str);
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ CallJS(call_callable, context, then, promise, on_resolve, on_reject);
+ Return(result);
+ }
+}
+
+TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
+ // 1. Let C be the this value.
+ Node* receiver = Parameter(0);
+ Node* value = Parameter(1);
+ Node* context = Parameter(4);
+ Isolate* isolate = this->isolate();
+
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "PromiseResolve");
+
+ Label if_valueisnativepromise(this), if_valueisnotnativepromise(this),
+ if_valueisnotpromise(this);
+
+ // 3.If IsPromise(x) is true, then
+ GotoIf(TaggedIsSmi(value), &if_valueisnotpromise);
+
+ // This shortcircuits the constructor lookups.
+ GotoUnless(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
+
+ // This adds a fast path as non-subclassed native promises don't have
+ // an observable constructor lookup.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, value, &if_valueisnativepromise,
+ &if_valueisnotnativepromise);
+
+ Bind(&if_valueisnativepromise);
+ {
+ GotoUnless(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
+ Return(value);
+ }
+
+ // At this point, value or/and receiver are not native promises, but
+ // they could be of the same subclass.
+ Bind(&if_valueisnotnativepromise);
+ {
+ // 3.a Let xConstructor be ? Get(x, "constructor").
+ // The constructor lookup is observable.
+ Node* const constructor_str =
+ HeapConstant(isolate->factory()->constructor_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const constructor =
+ CallStub(getproperty_callable, context, value, constructor_str);
+
+ // 3.b If SameValue(xConstructor, C) is true, return x.
+ GotoUnless(SameValue(constructor, receiver, context),
+ &if_valueisnotpromise);
+
+ Return(value);
+ }
- Handle<Context> context(isolate->context(), isolate);
+ Bind(&if_valueisnotpromise);
+ {
+ Label if_nativepromise(this), if_notnativepromise(this);
+ BranchIfFastPath(context, receiver, &if_nativepromise,
+ &if_notnativepromise);
- if (PromiseUtils::HasAlreadyVisited(context)) {
- return isolate->heap()->undefined_value();
+ // This adds a fast path for native promises that don't need to
+ // create NewPromiseCapability.
+ Bind(&if_nativepromise);
+ {
+ Label do_resolve(this);
+
+ Node* const result = AllocateAndInitJSPromise(context);
+ InternalResolvePromise(context, result, value);
+ Return(result);
+ }
+
+ Bind(&if_notnativepromise);
+ {
+ // 4. Let promiseCapability be ? NewPromiseCapability(C).
+ Node* const capability = NewPromiseCapability(context, receiver);
+
+ // 5. Perform ? Call(promiseCapability.[[Resolve]], undefined, « x »).
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ CallJS(call_callable, context, resolve, UndefinedConstant(), value);
+
+ // 6. Return promiseCapability.[[Promise]].
+ Node* const result =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(result);
+ }
}
+}
+
+TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
+ Node* const resolve = Parameter(1);
+ Node* const reject = Parameter(2);
+ Node* const context = Parameter(5);
- PromiseUtils::SetAlreadyVisited(context);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
- Handle<Object> debug_event =
- handle(PromiseUtils::GetDebugEvent(context), isolate);
- MaybeHandle<Object> maybe_result;
- Handle<Object> argv[] = {promise, value, debug_event};
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv));
- return isolate->heap()->undefined_value();
+ Node* const capability = LoadContextElement(context, kCapabilitySlot);
+
+ Label if_alreadyinvoked(this, Label::kDeferred);
+ GotoIf(WordNotEqual(
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset),
+ UndefinedConstant()),
+ &if_alreadyinvoked);
+ GotoIf(WordNotEqual(
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset),
+ UndefinedConstant()),
+ &if_alreadyinvoked);
+
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+ Return(UndefinedConstant());
+
+ Bind(&if_alreadyinvoked);
+ Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
+ Return(CallRuntime(Runtime::kThrowTypeError, context, message));
}
-// ES#sec-createresolvingfunctions
-// CreateResolvingFunctions ( promise )
-BUILTIN(CreateResolvingFunctions) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+ Node* constructor = Parameter(1);
+ Node* debug_event = Parameter(2);
+ Node* context = Parameter(5);
- Handle<JSObject> promise = args.at<JSObject>(1);
- Handle<Object> debug_event = args.at<Object>(2);
- Handle<JSFunction> resolve, reject;
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
- PromiseUtils::CreateResolvingFunctions(isolate, promise, debug_event,
- &resolve, &reject);
+ Return(NewPromiseCapability(context, constructor, debug_event));
+}
+
+TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
+ // 1. Let C be the this value.
+ Node* const receiver = Parameter(0);
+ Node* const reason = Parameter(1);
+ Node* const context = Parameter(4);
+
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "PromiseReject");
+
+ Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
+ &if_custompromise);
+
+ Bind(&if_nativepromise);
+ {
+ Node* const promise = AllocateAndSetJSPromise(
+ context, SmiConstant(v8::Promise::kRejected), reason);
+ CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise,
+ reason);
+ Return(promise);
+ }
+
+ Bind(&if_custompromise);
+ {
+ // 3. Let promiseCapability be ? NewPromiseCapability(C).
+ Node* const capability = NewPromiseCapability(context, receiver);
+
+ // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable call_callable = CodeFactory::Call(isolate());
+ CallJS(call_callable, context, reject, UndefinedConstant(), reason);
+
+ // 5. Return promiseCapability.[[Promise]].
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(2);
- result->set(0, *resolve);
- result->set(1, *reject);
+TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const reason = Parameter(2);
+ Node* const debug_event = Parameter(3);
+ Node* const context = Parameter(6);
- return *isolate->factory()->NewJSArrayWithElements(result, FAST_ELEMENTS, 2,
- NOT_TENURED);
+ InternalPromiseReject(context, promise, reason, debug_event);
+ Return(UndefinedConstant());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
new file mode 100644
index 0000000000..dee9a075a2
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+#include "src/contexts.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class PromiseBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ enum PromiseResolvingFunctionContextSlot {
+ // Whether the resolve/reject callback was already called.
+ kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // The promise which resolve/reject callbacks fulfill.
+ kPromiseSlot,
+
+ // Whether to trigger a debug event or not. Used in catch
+ // prediction.
+ kDebugEventSlot,
+ kPromiseContextLength,
+ };
+
+ enum FunctionContextSlot {
+ kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
+
+ kCapabilitiesContextLength,
+ };
+
+ explicit PromiseBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+ // These allocate and initialize a promise with pending state and
+ // undefined fields.
+ //
+ // This uses undefined as the parent promise for the promise init
+ // hook.
+ Node* AllocateAndInitJSPromise(Node* context);
+ // This uses the given parent as the parent promise for the promise
+ // init hook.
+ Node* AllocateAndInitJSPromise(Node* context, Node* parent);
+
+ // This allocates and initializes a promise with the given state and
+ // fields.
+ Node* AllocateAndSetJSPromise(Node* context, Node* status, Node* result);
+
+ Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
+ Node* resolve, Node* reject,
+ Node* context);
+
+ std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
+ Node* promise, Node* native_context, Node* promise_context);
+
+ Node* PromiseHasHandler(Node* promise);
+
+ Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
+ Node* native_context);
+
+ Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
+ Node* promise_capability);
+
+ Node* NewPromiseCapability(Node* context, Node* constructor,
+ Node* debug_event = nullptr);
+
+ protected:
+ void PromiseInit(Node* promise);
+
+ Node* ThrowIfNotJSReceiver(Node* context, Node* value,
+ MessageTemplate::Template msg_template,
+ const char* method_name = nullptr);
+
+ Node* SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor);
+
+ void PromiseSetHasHandler(Node* promise);
+
+ void AppendPromiseCallback(int offset, compiler::Node* promise,
+ compiler::Node* value);
+
+ Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
+ Node* on_reject);
+
+ Node* InternalPerformPromiseThen(Node* context, Node* promise,
+ Node* on_resolve, Node* on_reject,
+ Node* deferred_promise,
+ Node* deferred_on_resolve,
+ Node* deferred_on_reject);
+
+ void InternalResolvePromise(Node* context, Node* promise, Node* result);
+
+ void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
+ Label* if_ismodified);
+
+ void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
+ Label* if_isunmodified, Label* if_ismodified);
+
+ Node* CreatePromiseContext(Node* native_context, int slots);
+ void PromiseFulfill(Node* context, Node* promise, Node* result,
+ v8::Promise::PromiseState status);
+
+ void BranchIfAccessCheckFailed(Node* context, Node* native_context,
+ Node* promise_constructor, Node* executor,
+ Label* if_noaccess);
+
+ void InternalPromiseReject(Node* context, Node* promise, Node* value,
+ bool debug_event);
+ void InternalPromiseReject(Node* context, Node* promise, Node* value,
+ Node* debug_event);
+
+ private:
+ Node* AllocateJSPromise(Node* context);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index b4d16c4a7b..64947b1f77 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -17,9 +17,9 @@ namespace internal {
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> attributes = args.at<Object>(3);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
+ Handle<Object> attributes = args.at(3);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -48,8 +48,8 @@ BUILTIN(ReflectDefineProperty) {
BUILTIN(ReflectDeleteProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -73,7 +73,7 @@ BUILTIN(ReflectGet) {
HandleScope scope(isolate);
Handle<Object> target = args.atOrUndefined(isolate, 1);
Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+ Handle<Object> receiver = args.length() > 3 ? args.at(3) : target;
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -95,8 +95,8 @@ BUILTIN(ReflectGet) {
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -121,7 +121,7 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
BUILTIN(ReflectGetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -138,8 +138,8 @@ BUILTIN(ReflectGetPrototypeOf) {
BUILTIN(ReflectHas) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -162,7 +162,7 @@ BUILTIN(ReflectHas) {
BUILTIN(ReflectIsExtensible) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -181,7 +181,7 @@ BUILTIN(ReflectIsExtensible) {
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -203,7 +203,7 @@ BUILTIN(ReflectOwnKeys) {
BUILTIN(ReflectPreventExtensions) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -224,7 +224,7 @@ BUILTIN(ReflectSet) {
Handle<Object> target = args.atOrUndefined(isolate, 1);
Handle<Object> key = args.atOrUndefined(isolate, 2);
Handle<Object> value = args.atOrUndefined(isolate, 3);
- Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+ Handle<Object> receiver = args.length() > 4 ? args.at(4) : target;
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -249,8 +249,8 @@ BUILTIN(ReflectSet) {
BUILTIN(ReflectSetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> proto = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> proto = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 5f8d18be43..2191268441 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
#include "src/string-builder.h"
@@ -13,541 +14,400 @@
namespace v8 {
namespace internal {
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class RegExpBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit RegExpBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* FastLoadLastIndex(Node* regexp);
+ Node* SlowLoadLastIndex(Node* context, Node* regexp);
+ Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
+
+ void FastStoreLastIndex(Node* regexp, Node* value);
+ void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
+ void StoreLastIndex(Node* context, Node* regexp, Node* value,
+ bool is_fastpath);
+
+ Node* ConstructNewResultFromMatchInfo(Node* context, Node* match_info,
+ Node* string);
+
+ Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Label* if_didnotmatch,
+ const bool is_fastpath);
+ Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
+ MessageTemplate::Template msg_template,
+ char const* method_name);
+
+ Node* IsInitialRegExpMap(Node* context, Node* map);
+ void BranchIfFastRegExp(Node* context, Node* map, Label* if_isunmodified,
+ Label* if_ismodified);
+ void BranchIfFastRegExpResult(Node* context, Node* map,
+ Label* if_isunmodified, Label* if_ismodified);
+
+ Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
+
+ Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
+ Node* SlowFlagGetter(Node* const context, Node* const regexp,
+ JSRegExp::Flag flag);
+ Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
+ bool is_fastpath);
+ void FlagGetter(JSRegExp::Flag flag, v8::Isolate::UseCounterFeature counter,
+ const char* method_name);
+
+ Node* IsRegExp(Node* const context, Node* const maybe_receiver);
+ Node* RegExpInitialize(Node* const context, Node* const regexp,
+ Node* const maybe_pattern, Node* const maybe_flags);
+
+ Node* RegExpExec(Node* context, Node* regexp, Node* string);
+
+ Node* AdvanceStringIndex(Node* const string, Node* const index,
+ Node* const is_unicode);
+
+ void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
+ Node* const string);
+ void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
+ Node* const string);
+
+ void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
+ Node* const string, Node* const limit);
+
+ Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_callable);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_string);
+};
+
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
-namespace {
-
-Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
- static const int kMaxFlagsLength = 5 + 1; // 5 flags and '\0';
- char flags_string[kMaxFlagsLength];
- int i = 0;
-
- const JSRegExp::Flags flags = regexp->GetFlags();
-
- if ((flags & JSRegExp::kGlobal) != 0) flags_string[i++] = 'g';
- if ((flags & JSRegExp::kIgnoreCase) != 0) flags_string[i++] = 'i';
- if ((flags & JSRegExp::kMultiline) != 0) flags_string[i++] = 'm';
- if ((flags & JSRegExp::kUnicode) != 0) flags_string[i++] = 'u';
- if ((flags & JSRegExp::kSticky) != 0) flags_string[i++] = 'y';
-
- DCHECK_LT(i, kMaxFlagsLength);
- memset(&flags_string[i], '\0', kMaxFlagsLength - i);
-
- return isolate->factory()->NewStringFromAsciiChecked(flags_string);
-}
-
-// ES#sec-regexpinitialize
-// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-MUST_USE_RESULT MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
- Handle<JSRegExp> regexp,
- Handle<Object> pattern,
- Handle<Object> flags) {
- Handle<String> pattern_string;
- if (pattern->IsUndefined(isolate)) {
- pattern_string = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, pattern_string,
- Object::ToString(isolate, pattern), JSRegExp);
- }
-
- Handle<String> flags_string;
- if (flags->IsUndefined(isolate)) {
- flags_string = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, flags_string,
- Object::ToString(isolate, flags), JSRegExp);
- }
-
- // TODO(jgruber): We could avoid the flags back and forth conversions.
- return JSRegExp::Initialize(regexp, pattern_string, flags_string);
-}
-
-} // namespace
-
-// ES#sec-regexp-pattern-flags
-// RegExp ( pattern, flags )
-BUILTIN(RegExpConstructor) {
- HandleScope scope(isolate);
-
- Handle<HeapObject> new_target = args.new_target();
- Handle<Object> pattern = args.atOrUndefined(isolate, 1);
- Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
- Handle<JSFunction> target = isolate->regexp_function();
-
- bool pattern_is_regexp;
- {
- Maybe<bool> maybe_pattern_is_regexp =
- RegExpUtils::IsRegExp(isolate, pattern);
- if (maybe_pattern_is_regexp.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
- pattern_is_regexp = maybe_pattern_is_regexp.FromJust();
- }
-
- if (new_target->IsUndefined(isolate)) {
- new_target = target;
-
- // ES6 section 21.2.3.1 step 3.b
- if (pattern_is_regexp && flags->IsUndefined(isolate)) {
- Handle<Object> pattern_constructor;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern_constructor,
- Object::GetProperty(pattern,
- isolate->factory()->constructor_string()));
-
- if (pattern_constructor.is_identical_to(new_target)) {
- return *pattern;
- }
- }
- }
-
- if (pattern->IsJSRegExp()) {
- Handle<JSRegExp> regexp_pattern = Handle<JSRegExp>::cast(pattern);
-
- if (flags->IsUndefined(isolate)) {
- flags = PatternFlags(isolate, regexp_pattern);
- }
- pattern = handle(regexp_pattern->source(), isolate);
- } else if (pattern_is_regexp) {
- Handle<Object> pattern_source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern_source,
- Object::GetProperty(pattern, isolate->factory()->source_string()));
-
- if (flags->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, flags,
- Object::GetProperty(pattern, isolate->factory()->flags_string()));
- }
- pattern = pattern_source;
- }
-
- Handle<JSReceiver> new_target_receiver = Handle<JSReceiver>::cast(new_target);
-
- // TODO(jgruber): Fast-path for target == new_target == unmodified JSRegExp.
-
- Handle<JSObject> object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object, JSObject::New(target, new_target_receiver));
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
-
- RETURN_RESULT_OR_FAILURE(isolate,
- RegExpInitialize(isolate, regexp, pattern, flags));
-}
-
-BUILTIN(RegExpPrototypeCompile) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSRegExp, regexp, "RegExp.prototype.compile");
-
- Handle<Object> pattern = args.atOrUndefined(isolate, 1);
- Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
- if (pattern->IsJSRegExp()) {
- Handle<JSRegExp> pattern_regexp = Handle<JSRegExp>::cast(pattern);
-
- if (!flags->IsUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRegExpFlags));
- }
-
- flags = PatternFlags(isolate, pattern_regexp);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern,
- Object::GetProperty(pattern, isolate->factory()->source_string()));
- }
-
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, regexp, RegExpInitialize(isolate, regexp, pattern, flags));
-
- // Return undefined for compatibility with JSC.
- // See http://crbug.com/585775 for web compat details.
-
- return isolate->heap()->undefined_value();
-}
-
-namespace {
-
-compiler::Node* FastLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
// Load the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- return a->LoadObjectField(regexp, field_offset);
+ return LoadObjectField(regexp, field_offset);
}
-compiler::Node* SlowLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::SlowLoadLastIndex(Node* context, Node* regexp) {
// Load through the GetProperty stub.
- typedef compiler::Node Node;
-
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->lastIndex_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- return a->CallStub(getproperty_callable, context, regexp, name);
+ Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ return CallStub(getproperty_callable, context, regexp, name);
}
-compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* has_initialmap,
- compiler::Node* regexp) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
-
- Variable var_value(a, MachineRepresentation::kTagged);
-
- Label out(a), if_unmodified(a), if_modified(a);
- a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
- a->Bind(&if_unmodified);
- {
- var_value.Bind(FastLoadLastIndex(a, context, regexp));
- a->Goto(&out);
- }
-
- a->Bind(&if_modified);
- {
- var_value.Bind(SlowLoadLastIndex(a, context, regexp));
- a->Goto(&out);
- }
-
- a->Bind(&out);
- return var_value.value();
+Node* RegExpBuiltinsAssembler::LoadLastIndex(Node* context, Node* regexp,
+ bool is_fastpath) {
+ return is_fastpath ? FastLoadLastIndex(regexp)
+ : SlowLoadLastIndex(context, regexp);
}
// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
// JSRegExp instance.
-void FastStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
// Store the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- a->StoreObjectField(regexp, field_offset, value);
+ StoreObjectField(regexp, field_offset, value);
}
-void SlowStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
+ Node* value) {
// Store through runtime.
// TODO(ishell): Use SetPropertyStub here once available.
- typedef compiler::Node Node;
-
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->lastIndex_string());
- Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
- a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
- language_mode);
+ Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+ Node* const language_mode = SmiConstant(Smi::FromInt(STRICT));
+ CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+ language_mode);
}
-void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* has_initialmap, compiler::Node* regexp,
- compiler::Node* value) {
- typedef CodeStubAssembler::Label Label;
-
- Label out(a), if_unmodified(a), if_modified(a);
- a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
- a->Bind(&if_unmodified);
- {
- FastStoreLastIndex(a, context, regexp, value);
- a->Goto(&out);
- }
-
- a->Bind(&if_modified);
- {
- SlowStoreLastIndex(a, context, regexp, value);
- a->Goto(&out);
+void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
+ Node* value, bool is_fastpath) {
+ if (is_fastpath) {
+ FastStoreLastIndex(regexp, value);
+ } else {
+ SlowStoreLastIndex(context, regexp, value);
}
-
- a->Bind(&out);
}
-compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
- CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* match_info,
- compiler::Node* string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Label out(a);
-
- CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0,
- mode));
- Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
- Node* const start = a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), 0,
- mode);
- Node* const end = a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0,
- mode);
+Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
+ Node* match_info,
+ Node* string) {
+ Label out(this);
+
+ Node* const num_indices = SmiUntag(LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
+ Node* const num_results = SmiTag(WordShr(num_indices, 1));
+ Node* const start =
+ LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const end = LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kFirstCaptureIndex + 1);
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- Node* const first = a->SubString(context, string, start, end);
+ Node* const first = SubString(context, string, start, end);
Node* const result =
- a->AllocateRegExpResult(context, num_results, start, string);
- Node* const result_elements = a->LoadElements(result);
+ AllocateRegExpResult(context, num_results, start, string);
+ Node* const result_elements = LoadElements(result);
- a->StoreFixedArrayElement(result_elements, a->IntPtrConstant(0), first,
- SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
- a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
+ GotoIf(SmiEqual(num_results, SmiConstant(Smi::FromInt(1))), &out);
// Store all remaining captures.
- Node* const limit = a->IntPtrAdd(
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
+ Node* const limit = IntPtrAdd(
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
- Variable var_from_cursor(a, MachineType::PointerRepresentation());
- Variable var_to_cursor(a, MachineType::PointerRepresentation());
+ Variable var_from_cursor(this, MachineType::PointerRepresentation());
+ Variable var_to_cursor(this, MachineType::PointerRepresentation());
- var_from_cursor.Bind(
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
- var_to_cursor.Bind(a->IntPtrConstant(1));
+ var_from_cursor.Bind(IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
+ var_to_cursor.Bind(IntPtrConstant(1));
Variable* vars[] = {&var_from_cursor, &var_to_cursor};
- Label loop(a, 2, vars);
+ Label loop(this, 2, vars);
- a->Goto(&loop);
- a->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* const from_cursor = var_from_cursor.value();
Node* const to_cursor = var_to_cursor.value();
- Node* const start = a->LoadFixedArrayElement(match_info, from_cursor);
+ Node* const start = LoadFixedArrayElement(match_info, from_cursor);
- Label next_iter(a);
- a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
+ Label next_iter(this);
+ GotoIf(SmiEqual(start, SmiConstant(Smi::FromInt(-1))), &next_iter);
- Node* const from_cursor_plus1 =
- a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
- Node* const end = a->LoadFixedArrayElement(match_info, from_cursor_plus1);
+ Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
+ Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
- Node* const capture = a->SubString(context, string, start, end);
- a->StoreFixedArrayElement(result_elements, to_cursor, capture);
- a->Goto(&next_iter);
+ Node* const capture = SubString(context, string, start, end);
+ StoreFixedArrayElement(result_elements, to_cursor, capture);
+ Goto(&next_iter);
- a->Bind(&next_iter);
- var_from_cursor.Bind(a->IntPtrAdd(from_cursor, a->IntPtrConstant(2)));
- var_to_cursor.Bind(a->IntPtrAdd(to_cursor, a->IntPtrConstant(1)));
- a->Branch(a->UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+ Bind(&next_iter);
+ var_from_cursor.Bind(IntPtrAdd(from_cursor, IntPtrConstant(2)));
+ var_to_cursor.Bind(IntPtrAdd(to_cursor, IntPtrConstant(1)));
+ Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
}
- a->Bind(&out);
+ Bind(&out);
return result;
}
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-compiler::Node* RegExpPrototypeExecInternal(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* maybe_receiver,
- compiler::Node* maybe_string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+// Implements the core of RegExp.prototype.exec but without actually
+// constructing the JSRegExpResult. Returns either null (if the RegExp did not
+// match) or a fixed array containing match indices as returned by
+// RegExpExecStub.
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
+ Node* const context, Node* const regexp, Node* const string,
+ Label* if_didnotmatch, const bool is_fastpath) {
+ Isolate* const isolate = this->isolate();
+
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+
+ if (!is_fastpath) {
+ ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
+ }
- Isolate* const isolate = a->isolate();
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
- Node* const null = a->NullConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label out(this);
- Variable var_result(a, MachineRepresentation::kTagged);
- Label out(a);
-
- // Ensure {maybe_receiver} is a JSRegExp.
- Node* const regexp_map = a->ThrowIfNotInstanceType(
- context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
- Node* const regexp = maybe_receiver;
-
- // Check whether the regexp instance is unmodified.
- Node* const native_context = a->LoadNativeContext(context);
- Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(regexp_map, initial_map);
-
- // Convert {maybe_string} to a string.
- Callable tostring_callable = CodeFactory::ToString(isolate);
- Node* const string = a->CallStub(tostring_callable, context, maybe_string);
- Node* const string_length = a->LoadStringLength(string);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const string_length = LoadStringLength(string);
// Check whether the regexp is global or sticky, which determines whether we
// update last index later on.
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const is_global_or_sticky =
- a->WordAnd(a->SmiUntag(flags),
- a->IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const is_global_or_sticky = WordAnd(
+ SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
Node* const should_update_last_index =
- a->WordNotEqual(is_global_or_sticky, int_zero);
+ WordNotEqual(is_global_or_sticky, int_zero);
// Grab and possibly update last index.
- Label run_exec(a);
- Variable var_lastindex(a, MachineRepresentation::kTagged);
+ Label run_exec(this);
+ Variable var_lastindex(this, MachineRepresentation::kTagged);
{
- Label if_doupdate(a), if_dontupdate(a);
- a->Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
+ Label if_doupdate(this), if_dontupdate(this);
+ Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
- a->Bind(&if_doupdate);
+ Bind(&if_doupdate);
{
Node* const regexp_lastindex =
- LoadLastIndex(a, context, has_initialmap, regexp);
+ LoadLastIndex(context, regexp, is_fastpath);
+ var_lastindex.Bind(regexp_lastindex);
+
+ // Omit ToLength if lastindex is a non-negative smi.
+ {
+ Label call_tolength(this, Label::kDeferred), next(this);
+ Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
- Callable tolength_callable = CodeFactory::ToLength(isolate);
- Node* const lastindex =
- a->CallStub(tolength_callable, context, regexp_lastindex);
- var_lastindex.Bind(lastindex);
+ Bind(&call_tolength);
+ {
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ var_lastindex.Bind(
+ CallStub(tolength_callable, context, regexp_lastindex));
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
- Label if_isoob(a, Label::kDeferred);
- a->GotoUnless(a->TaggedIsSmi(lastindex), &if_isoob);
- a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
- a->Goto(&run_exec);
+ Node* const lastindex = var_lastindex.value();
- a->Bind(&if_isoob);
+ Label if_isoob(this, Label::kDeferred);
+ GotoUnless(TaggedIsSmi(lastindex), &if_isoob);
+ GotoUnless(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+ Goto(&run_exec);
+
+ Bind(&if_isoob);
{
- StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
var_result.Bind(null);
- a->Goto(&out);
+ Goto(if_didnotmatch);
}
}
- a->Bind(&if_dontupdate);
+ Bind(&if_dontupdate);
{
var_lastindex.Bind(smi_zero);
- a->Goto(&run_exec);
+ Goto(&run_exec);
}
}
Node* match_indices;
- Label successful_match(a);
- a->Bind(&run_exec);
+ Label successful_match(this);
+ Bind(&run_exec);
{
// Get last match info from the context.
- Node* const last_match_info = a->LoadContextElement(
+ Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
// Call the exec stub.
Callable exec_callable = CodeFactory::RegExpExec(isolate);
- match_indices = a->CallStub(exec_callable, context, regexp, string,
- var_lastindex.value(), last_match_info);
+ match_indices = CallStub(exec_callable, context, regexp, string,
+ var_lastindex.value(), last_match_info);
+ var_result.Bind(match_indices);
// {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
- a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
-
- Label return_null(a);
- a->GotoUnless(should_update_last_index, &return_null);
+ GotoUnless(WordEqual(match_indices, null), &successful_match);
- StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
- a->Goto(&return_null);
+ GotoUnless(should_update_last_index, if_didnotmatch);
- a->Bind(&return_null);
- var_result.Bind(null);
- a->Goto(&out);
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+ Goto(if_didnotmatch);
}
- Label construct_result(a);
- a->Bind(&successful_match);
+ Bind(&successful_match);
{
- a->GotoUnless(should_update_last_index, &construct_result);
+ GotoUnless(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- Node* const new_lastindex = a->LoadFixedArrayElement(
- match_indices,
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1));
-
- StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
- a->Goto(&construct_result);
+ Node* const new_lastindex = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- a->Bind(&construct_result);
- {
- Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
- match_indices, string);
- var_result.Bind(result);
- a->Goto(&out);
- }
+ StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ const bool is_fastpath) {
+ Node* const null = NullConstant();
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+ Variable var_result(this, MachineRepresentation::kTagged);
- Node* const result =
- RegExpPrototypeExecInternal(a, context, maybe_receiver, maybe_string);
- a->Return(result);
-}
+ Label if_didnotmatch(this), out(this);
+ Node* const indices_or_null = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, is_fastpath);
-namespace {
+ // Successful match.
+ {
+ Node* const match_indices = indices_or_null;
+ Node* const result =
+ ConstructNewResultFromMatchInfo(context, match_indices, string);
+ var_result.Bind(result);
+ Goto(&out);
+ }
-compiler::Node* ThrowIfNotJSReceiver(CodeStubAssembler* a, Isolate* isolate,
- compiler::Node* context,
- compiler::Node* value,
- MessageTemplate::Template msg_template,
- char const* method_name) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+ Bind(&if_didnotmatch);
+ {
+ var_result.Bind(null);
+ Goto(&out);
+ }
- Label out(a), throw_exception(a, Label::kDeferred);
- Variable var_value_map(a, MachineRepresentation::kTagged);
+ Bind(&out);
+ return var_result.value();
+}
- a->GotoIf(a->TaggedIsSmi(value), &throw_exception);
+Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
+ Node* context, Node* maybe_receiver, MessageTemplate::Template msg_template,
+ char const* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Variable var_value_map(this, MachineRepresentation::kTagged);
+
+ GotoIf(TaggedIsSmi(maybe_receiver), &throw_exception);
// Load the instance type of the {value}.
- var_value_map.Bind(a->LoadMap(value));
- Node* const value_instance_type =
- a->LoadMapInstanceType(var_value_map.value());
+ var_value_map.Bind(LoadMap(maybe_receiver));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
- a->Branch(a->IsJSReceiverInstanceType(value_instance_type), &out,
- &throw_exception);
+ Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
// The {value} is not a compatible receiver for this method.
- a->Bind(&throw_exception);
+ Bind(&throw_exception);
{
- Node* const message_id = a->SmiConstant(Smi::FromInt(msg_template));
- Node* const method_name_str = a->HeapConstant(
- isolate->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+ Node* const message_id = SmiConstant(Smi::FromInt(msg_template));
+ Node* const method_name_str = HeapConstant(
+ isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
- Callable callable = CodeFactory::ToString(isolate);
- Node* const value_str = a->CallStub(callable, context, value);
+ Callable callable = CodeFactory::ToString(isolate());
+ Node* const value_str = CallStub(callable, context, maybe_receiver);
- a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str, value_str);
- var_value_map.Bind(a->UndefinedConstant());
- a->Goto(&out); // Never reached.
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
+ value_str);
+ var_value_map.Bind(UndefinedConstant());
+ Goto(&out); // Never reached.
}
- a->Bind(&out);
+ Bind(&out);
return var_value_map.value();
}
-compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* map) {
- typedef compiler::Node Node;
-
- Node* const native_context = a->LoadNativeContext(context);
+Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* map) {
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(map, initial_map);
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
return has_initialmap;
}
@@ -556,192 +416,499 @@ compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
// We use a fairly coarse granularity for this and simply check whether both
// the regexp itself is unmodified (i.e. its map has not changed) and its
// prototype is unmodified.
-void BranchIfFastPath(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* map,
- CodeStubAssembler::Label* if_isunmodified,
- CodeStubAssembler::Label* if_ismodified) {
- typedef compiler::Node Node;
-
- Node* const native_context = a->LoadNativeContext(context);
+void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* context, Node* map,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(map, initial_map);
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
- a->GotoUnless(has_initialmap, if_ismodified);
+ GotoUnless(has_initialmap, if_ismodified);
- Node* const initial_proto_initial_map = a->LoadContextElement(
- native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = a->LoadMap(a->LoadMapPrototype(map));
+ Node* const initial_proto_initial_map =
+ LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+ Node* const proto_map = LoadMap(LoadMapPrototype(map));
Node* const proto_has_initialmap =
- a->WordEqual(proto_map, initial_proto_initial_map);
+ WordEqual(proto_map, initial_proto_initial_map);
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
- a->Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
}
-} // namespace
+void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* context, Node* map,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_regexp_result_map =
+ LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
-void Builtins::Generate_RegExpPrototypeFlagsGetter(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ Branch(WordEqual(map, initial_regexp_result_map), if_isunmodified,
+ if_ismodified);
+}
- Node* const receiver = a->Parameter(0);
- Node* const context = a->Parameter(3);
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
- Isolate* isolate = a->isolate();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const int_one = a->IntPtrConstant(1);
+ // Ensure {maybe_receiver} is a JSRegExp.
+ Node* const regexp_map = ThrowIfNotInstanceType(
+ context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+ Node* const receiver = maybe_receiver;
- Node* const map = ThrowIfNotJSReceiver(a, isolate, context, receiver,
- MessageTemplate::kRegExpNonObject,
- "RegExp.prototype.flags");
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- Variable var_length(a, MachineType::PointerRepresentation());
- Variable var_flags(a, MachineType::PointerRepresentation());
+ Label if_isfastpath(this), if_isslowpath(this);
+ Branch(IsInitialRegExpMap(context, regexp_map), &if_isfastpath,
+ &if_isslowpath);
+
+ Bind(&if_isfastpath);
+ {
+ Node* const result =
+ RegExpPrototypeExecBody(context, receiver, string, true);
+ Return(result);
+ }
+
+ Bind(&if_isslowpath);
+ {
+ Node* const result =
+ RegExpPrototypeExecBody(context, receiver, string, false);
+ Return(result);
+ }
+}
+
+Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
+ Node* const regexp,
+ bool is_fastpath) {
+ Isolate* isolate = this->isolate();
+
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_one = IntPtrConstant(1);
+ Variable var_length(this, MachineType::PointerRepresentation());
+ Variable var_flags(this, MachineType::PointerRepresentation());
// First, count the number of characters we will need and check which flags
// are set.
var_length.Bind(int_zero);
- Label if_isunmodifiedjsregexp(a),
- if_isnotunmodifiedjsregexp(a, Label::kDeferred);
- a->Branch(IsInitialRegExpMap(a, context, map), &if_isunmodifiedjsregexp,
- &if_isnotunmodifiedjsregexp);
-
- Label construct_string(a);
- a->Bind(&if_isunmodifiedjsregexp);
- {
+ if (is_fastpath) {
// Refer to JSRegExp's flag property on the fast-path.
- Node* const flags_smi =
- a->LoadObjectField(receiver, JSRegExp::kFlagsOffset);
- Node* const flags_intptr = a->SmiUntag(flags_smi);
+ Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const flags_intptr = SmiUntag(flags_smi);
var_flags.Bind(flags_intptr);
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a);
-
-#define CASE_FOR_FLAG(FLAG, LABEL, NEXT_LABEL) \
- do { \
- a->Bind(&LABEL); \
- Node* const mask = a->IntPtrConstant(FLAG); \
- a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
- &NEXT_LABEL); \
- var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
- a->Goto(&NEXT_LABEL); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG(JSRegExp::kGlobal, label_global, label_ignorecase);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, label_ignorecase, label_multiline);
- CASE_FOR_FLAG(JSRegExp::kMultiline, label_multiline, label_unicode);
- CASE_FOR_FLAG(JSRegExp::kUnicode, label_unicode, label_sticky);
- CASE_FOR_FLAG(JSRegExp::kSticky, label_sticky, construct_string);
+ CASE_FOR_FLAG(JSRegExp::kGlobal);
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG(JSRegExp::kMultiline);
+ CASE_FOR_FLAG(JSRegExp::kUnicode);
+ CASE_FOR_FLAG(JSRegExp::kSticky);
#undef CASE_FOR_FLAG
- }
+ } else {
+ DCHECK(!is_fastpath);
- a->Bind(&if_isnotunmodifiedjsregexp);
- {
// Fall back to GetProperty stub on the slow-path.
var_flags.Bind(int_zero);
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a);
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
-#define CASE_FOR_FLAG(NAME, FLAG, LABEL, NEXT_LABEL) \
+#define CASE_FOR_FLAG(NAME, FLAG) \
do { \
- a->Bind(&LABEL); \
+ Label next(this); \
Node* const name = \
- a->HeapConstant(isolate->factory()->NewStringFromAsciiChecked(NAME)); \
- Node* const flag = \
- a->CallStub(getproperty_callable, context, receiver, name); \
- Label if_isflagset(a); \
- a->BranchIfToBooleanIsTrue(flag, &if_isflagset, &NEXT_LABEL); \
- a->Bind(&if_isflagset); \
- var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
- var_flags.Bind(a->WordOr(var_flags.value(), a->IntPtrConstant(FLAG))); \
- a->Goto(&NEXT_LABEL); \
+ HeapConstant(isolate->factory()->InternalizeUtf8String(NAME)); \
+ Node* const flag = CallStub(getproperty_callable, context, regexp, name); \
+ Label if_isflagset(this); \
+ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
+ Bind(&if_isflagset); \
+ var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
+ var_flags.Bind(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG("global", JSRegExp::kGlobal, label_global, label_ignorecase);
- CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase, label_ignorecase,
- label_multiline);
- CASE_FOR_FLAG("multiline", JSRegExp::kMultiline, label_multiline,
- label_unicode);
- CASE_FOR_FLAG("unicode", JSRegExp::kUnicode, label_unicode, label_sticky);
- CASE_FOR_FLAG("sticky", JSRegExp::kSticky, label_sticky, construct_string);
+ CASE_FOR_FLAG("global", JSRegExp::kGlobal);
+ CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
+ CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
+ CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
}
// Allocate a string of the required length and fill it with the corresponding
// char for each set flag.
- a->Bind(&construct_string);
{
- Node* const result =
- a->AllocateSeqOneByteString(context, var_length.value());
+ Node* const result = AllocateSeqOneByteString(context, var_length.value());
Node* const flags_intptr = var_flags.value();
- Variable var_offset(a, MachineType::PointerRepresentation());
+ Variable var_offset(this, MachineType::PointerRepresentation());
var_offset.Bind(
- a->IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a), out(a);
-
-#define CASE_FOR_FLAG(FLAG, CHAR, LABEL, NEXT_LABEL) \
- do { \
- a->Bind(&LABEL); \
- Node* const mask = a->IntPtrConstant(FLAG); \
- a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
- &NEXT_LABEL); \
- Node* const value = a->IntPtrConstant(CHAR); \
- a->StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
- var_offset.value(), value); \
- var_offset.Bind(a->IntPtrAdd(var_offset.value(), int_one)); \
- a->Goto(&NEXT_LABEL); \
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+#define CASE_FOR_FLAG(FLAG, CHAR) \
+ do { \
+ Label next(this); \
+ GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ Node* const value = Int32Constant(CHAR); \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+ var_offset.value(), value); \
+ var_offset.Bind(IntPtrAdd(var_offset.value(), int_one)); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG(JSRegExp::kGlobal, 'g', label_global, label_ignorecase);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i', label_ignorecase,
- label_multiline);
- CASE_FOR_FLAG(JSRegExp::kMultiline, 'm', label_multiline, label_unicode);
- CASE_FOR_FLAG(JSRegExp::kUnicode, 'u', label_unicode, label_sticky);
- CASE_FOR_FLAG(JSRegExp::kSticky, 'y', label_sticky, out);
+ CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
+ CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
+ CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
+ CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
#undef CASE_FOR_FLAG
- a->Bind(&out);
- a->Return(result);
+ return result;
}
}
-// ES6 21.2.5.10.
-BUILTIN(RegExpPrototypeSourceGetter) {
- HandleScope scope(isolate);
+// ES#sec-isregexp IsRegExp ( argument )
+Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
+ Node* const maybe_receiver) {
+ Label out(this), if_isregexp(this);
+
+ Variable var_result(this, MachineRepresentation::kWord32);
+ var_result.Bind(Int32Constant(0));
+
+ GotoIf(TaggedIsSmi(maybe_receiver), &out);
+ GotoUnless(IsJSReceiver(maybe_receiver), &out);
+
+ Node* const receiver = maybe_receiver;
+
+ // Check @@match.
+ {
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ Node* const name = HeapConstant(isolate()->factory()->match_symbol());
+ Node* const value = CallStub(getproperty_callable, context, receiver, name);
+
+ Label match_isundefined(this), match_isnotundefined(this);
+ Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined);
+
+ Bind(&match_isundefined);
+ Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isregexp, &out);
+
+ Bind(&match_isnotundefined);
+ BranchIfToBooleanIsTrue(value, &if_isregexp, &out);
+ }
+
+ Bind(&if_isregexp);
+ var_result.Bind(Int32Constant(1));
+ Goto(&out);
+
+ Bind(&out);
+ return var_result.value();
+}
+
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
+Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
+ Node* const regexp,
+ Node* const maybe_pattern,
+ Node* const maybe_flags) {
+ // Normalize pattern.
+ Node* const pattern =
+ Select(IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
+ [=] { return ToString(context, maybe_pattern); },
+ MachineRepresentation::kTagged);
+
+ // Normalize flags.
+ Node* const flags =
+ Select(IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
+ [=] { return ToString(context, maybe_flags); },
+ MachineRepresentation::kTagged);
+
+ // Initialize.
+
+ return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
+ pattern, flags);
+}
+
+TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const context = Parameter(3);
+
+ Node* const map = ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kRegExpNonObject,
+ "RegExp.prototype.flags");
+ Node* const receiver = maybe_receiver;
+
+ Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
+ Branch(IsInitialRegExpMap(context, map), &if_isfastpath, &if_isslowpath);
+
+ Bind(&if_isfastpath);
+ Return(FlagsGetter(context, receiver, true));
+
+ Bind(&if_isslowpath);
+ Return(FlagsGetter(context, receiver, false));
+}
+
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
+TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
+ Node* const pattern = Parameter(1);
+ Node* const flags = Parameter(2);
+ Node* const new_target = Parameter(3);
+ Node* const context = Parameter(5);
+
+ Isolate* isolate = this->isolate();
+
+ Variable var_flags(this, MachineRepresentation::kTagged);
+ Variable var_pattern(this, MachineRepresentation::kTagged);
+ Variable var_new_target(this, MachineRepresentation::kTagged);
+
+ var_flags.Bind(flags);
+ var_pattern.Bind(pattern);
+ var_new_target.Bind(new_target);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const regexp_function =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+
+ Node* const pattern_is_regexp = IsRegExp(context, pattern);
+
+ {
+ Label next(this);
+
+ GotoUnless(IsUndefined(new_target), &next);
+ var_new_target.Bind(regexp_function);
+
+ GotoUnless(pattern_is_regexp, &next);
+ GotoUnless(IsUndefined(flags), &next);
+
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const name = HeapConstant(isolate->factory()->constructor_string());
+ Node* const value = CallStub(getproperty_callable, context, pattern, name);
+
+ GotoUnless(WordEqual(value, regexp_function), &next);
+ Return(pattern);
+
+ Bind(&next);
+ }
+
+ {
+ Label next(this), if_patternisfastregexp(this),
+ if_patternisslowregexp(this);
+ GotoIf(TaggedIsSmi(pattern), &next);
+
+ GotoIf(HasInstanceType(pattern, JS_REGEXP_TYPE), &if_patternisfastregexp);
+
+ Branch(pattern_is_regexp, &if_patternisslowregexp, &next);
+
+ Bind(&if_patternisfastregexp);
+ {
+ Node* const source = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+ var_pattern.Bind(source);
+
+ {
+ Label inner_next(this);
+ GotoUnless(IsUndefined(flags), &inner_next);
+
+ Node* const value = FlagsGetter(context, pattern, true);
+ var_flags.Bind(value);
+ Goto(&inner_next);
+
+ Bind(&inner_next);
+ }
+
+ Goto(&next);
+ }
+
+ Bind(&if_patternisslowregexp);
+ {
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+
+ {
+ Node* const name = HeapConstant(isolate->factory()->source_string());
+ Node* const value =
+ CallStub(getproperty_callable, context, pattern, name);
+ var_pattern.Bind(value);
+ }
+
+ {
+ Label inner_next(this);
+ GotoUnless(IsUndefined(flags), &inner_next);
+
+ Node* const name = HeapConstant(isolate->factory()->flags_string());
+ Node* const value =
+ CallStub(getproperty_callable, context, pattern, name);
+ var_flags.Bind(value);
+ Goto(&inner_next);
+
+ Bind(&inner_next);
+ }
+
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
+
+ // Allocate.
+
+ Variable var_regexp(this, MachineRepresentation::kTagged);
+ {
+ Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred),
+ next(this);
+ Branch(WordEqual(var_new_target.value(), regexp_function),
+ &allocate_jsregexp, &allocate_generic);
+
+ Bind(&allocate_jsregexp);
+ {
+ Node* const initial_map = LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const regexp = AllocateJSObjectFromMap(initial_map);
+ var_regexp.Bind(regexp);
+ Goto(&next);
+ }
+
+ Bind(&allocate_generic);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const regexp = constructor_assembler.EmitFastNewObject(
+ context, regexp_function, var_new_target.value());
+ var_regexp.Bind(regexp);
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
+
+ Node* const result = RegExpInitialize(context, var_regexp.value(),
+ var_pattern.value(), var_flags.value());
+ Return(result);
+}
+
+// ES#sec-regexp.prototype.compile
+// RegExp.prototype.compile ( pattern, flags )
+TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_pattern = Parameter(1);
+ Node* const maybe_flags = Parameter(2);
+ Node* const context = Parameter(5);
+
+ ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+ "RegExp.prototype.compile");
+ Node* const receiver = maybe_receiver;
+
+ Variable var_flags(this, MachineRepresentation::kTagged);
+ Variable var_pattern(this, MachineRepresentation::kTagged);
+
+ var_flags.Bind(maybe_flags);
+ var_pattern.Bind(maybe_pattern);
+
+ // Handle a JSRegExp pattern.
+ {
+ Label next(this);
+
+ GotoIf(TaggedIsSmi(maybe_pattern), &next);
+ GotoUnless(HasInstanceType(maybe_pattern, JS_REGEXP_TYPE), &next);
+
+ Node* const pattern = maybe_pattern;
+
+ // {maybe_flags} must be undefined in this case, otherwise throw.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(maybe_flags), &next);
- Handle<Object> recv = args.receiver();
- if (!recv->IsJSRegExp()) {
- Handle<JSFunction> regexp_fun = isolate->regexp_function();
- if (*recv == regexp_fun->prototype()) {
- isolate->CountUsage(v8::Isolate::kRegExpPrototypeSourceGetter);
- return *isolate->factory()->NewStringFromAsciiChecked("(?:)");
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
+ TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+
+ Bind(&next);
}
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRegExpNonRegExp,
- isolate->factory()->NewStringFromAsciiChecked(
- "RegExp.prototype.source")));
+
+ Node* const new_flags = FlagsGetter(context, pattern, true);
+ Node* const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+
+ var_flags.Bind(new_flags);
+ var_pattern.Bind(new_pattern);
+
+ Goto(&next);
+ Bind(&next);
}
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(recv);
- return regexp->source();
+ Node* const result = RegExpInitialize(context, receiver, var_pattern.value(),
+ var_flags.value());
+ Return(result);
+}
+
+// ES6 21.2.5.10.
+TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
+ Node* const receiver = Parameter(0);
+ Node* const context = Parameter(3);
+
+ // Check whether we have an unmodified regexp instance.
+ Label if_isjsregexp(this), if_isnotjsregexp(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(receiver), &if_isnotjsregexp);
+ Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isjsregexp,
+ &if_isnotjsregexp);
+
+ Bind(&if_isjsregexp);
+ {
+ Node* const source = LoadObjectField(receiver, JSRegExp::kSourceOffset);
+ Return(source);
+ }
+
+ Bind(&if_isnotjsregexp);
+ {
+ Isolate* isolate = this->isolate();
+ Node* const native_context = LoadNativeContext(context);
+ Node* const regexp_fun =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const initial_prototype = LoadMapPrototype(initial_map);
+
+ Label if_isprototype(this), if_isnotprototype(this);
+ Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+ &if_isnotprototype);
+
+ Bind(&if_isprototype);
+ {
+ const int counter = v8::Isolate::kRegExpPrototypeSourceGetter;
+ Node* const counter_smi = SmiConstant(counter);
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+
+ Node* const result =
+ HeapConstant(isolate->factory()->NewStringFromAsciiChecked("(?:)"));
+ Return(result);
+ }
+
+ Bind(&if_isnotprototype);
+ {
+ Node* const message_id =
+ SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const method_name_str =
+ HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
+ "RegExp.prototype.source"));
+ TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str);
+ }
+ }
}
BUILTIN(RegExpPrototypeToString) {
@@ -781,126 +948,166 @@ BUILTIN(RegExpPrototypeToString) {
RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
}
-// ES6 21.2.4.2.
-BUILTIN(RegExpPrototypeSpeciesGetter) {
- HandleScope scope(isolate);
- return *args.receiver();
+// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
+Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
+ JSRegExp::Flag flag) {
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const mask = SmiConstant(Smi::FromInt(flag));
+ Node* const is_flag_set = WordNotEqual(SmiAnd(flags, mask), smi_zero);
+
+ return is_flag_set;
}
-namespace {
+// Load through the GetProperty stub.
+Node* RegExpBuiltinsAssembler::SlowFlagGetter(Node* const context,
+ Node* const regexp,
+ JSRegExp::Flag flag) {
+ Factory* factory = isolate()->factory();
-// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
-compiler::Node* FastFlagGetter(CodeStubAssembler* a,
- compiler::Node* const regexp,
- JSRegExp::Flag flag) {
- typedef compiler::Node Node;
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const mask = a->SmiConstant(Smi::FromInt(flag));
- Node* const is_flag_set = a->WordNotEqual(a->WordAnd(flags, mask), smi_zero);
+ Node* name;
- return is_flag_set;
+ switch (flag) {
+ case JSRegExp::kGlobal:
+ name = HeapConstant(factory->global_string());
+ break;
+ case JSRegExp::kIgnoreCase:
+ name = HeapConstant(factory->ignoreCase_string());
+ break;
+ case JSRegExp::kMultiline:
+ name = HeapConstant(factory->multiline_string());
+ break;
+ case JSRegExp::kSticky:
+ name = HeapConstant(factory->sticky_string());
+ break;
+ case JSRegExp::kUnicode:
+ name = HeapConstant(factory->unicode_string());
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ Node* const value = CallStub(getproperty_callable, context, regexp, name);
+
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+
+ Bind(&if_true);
+ {
+ var_result.Bind(Int32Constant(1));
+ Goto(&out);
+ }
+
+ Bind(&if_false);
+ {
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
}
-void Generate_FlagGetter(CodeStubAssembler* a, JSRegExp::Flag flag,
- v8::Isolate::UseCounterFeature counter,
- const char* method_name) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
+ Node* const regexp,
+ JSRegExp::Flag flag,
+ bool is_fastpath) {
+ return is_fastpath ? FastFlagGetter(regexp, flag)
+ : SlowFlagGetter(context, regexp, flag);
+}
- Node* const receiver = a->Parameter(0);
- Node* const context = a->Parameter(3);
+void RegExpBuiltinsAssembler::FlagGetter(JSRegExp::Flag flag,
+ v8::Isolate::UseCounterFeature counter,
+ const char* method_name) {
+ Node* const receiver = Parameter(0);
+ Node* const context = Parameter(3);
- Isolate* isolate = a->isolate();
+ Isolate* isolate = this->isolate();
// Check whether we have an unmodified regexp instance.
- Label if_isunmodifiedjsregexp(a),
- if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+ Label if_isunmodifiedjsregexp(this),
+ if_isnotunmodifiedjsregexp(this, Label::kDeferred);
- a->GotoIf(a->TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
+ GotoIf(TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
- Node* const receiver_map = a->LoadMap(receiver);
- Node* const instance_type = a->LoadMapInstanceType(receiver_map);
+ Node* const receiver_map = LoadMap(receiver);
+ Node* const instance_type = LoadMapInstanceType(receiver_map);
- a->Branch(a->Word32Equal(instance_type, a->Int32Constant(JS_REGEXP_TYPE)),
- &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
+ Branch(Word32Equal(instance_type, Int32Constant(JS_REGEXP_TYPE)),
+ &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
- a->Bind(&if_isunmodifiedjsregexp);
+ Bind(&if_isunmodifiedjsregexp);
{
// Refer to JSRegExp's flag property on the fast-path.
- Node* const is_flag_set = FastFlagGetter(a, receiver, flag);
- a->Return(a->Select(is_flag_set, a->TrueConstant(), a->FalseConstant()));
+ Node* const is_flag_set = FastFlagGetter(receiver, flag);
+ Return(SelectBooleanConstant(is_flag_set));
}
- a->Bind(&if_isnotunmodifiedjsregexp);
+ Bind(&if_isnotunmodifiedjsregexp);
{
- Node* const native_context = a->LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map = a->LoadObjectField(
- regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const initial_prototype = a->LoadMapPrototype(initial_map);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const initial_prototype = LoadMapPrototype(initial_map);
- Label if_isprototype(a), if_isnotprototype(a);
- a->Branch(a->WordEqual(receiver, initial_prototype), &if_isprototype,
- &if_isnotprototype);
+ Label if_isprototype(this), if_isnotprototype(this);
+ Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+ &if_isnotprototype);
- a->Bind(&if_isprototype);
+ Bind(&if_isprototype);
{
- Node* const counter_smi = a->SmiConstant(Smi::FromInt(counter));
- a->CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
- a->Return(a->UndefinedConstant());
+ Node* const counter_smi = SmiConstant(Smi::FromInt(counter));
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+ Return(UndefinedConstant());
}
- a->Bind(&if_isnotprototype);
+ Bind(&if_isnotprototype);
{
Node* const message_id =
- a->SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
- Node* const method_name_str = a->HeapConstant(
+ SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const method_name_str = HeapConstant(
isolate->factory()->NewStringFromAsciiChecked(method_name));
- a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
- a->Return(a->UndefinedConstant()); // Never reached.
+ CallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str);
+ Return(UndefinedConstant()); // Never reached.
}
}
}
-} // namespace
-
// ES6 21.2.5.4.
-void Builtins::Generate_RegExpPrototypeGlobalGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kGlobal,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.global");
+TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kGlobal, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.global");
}
// ES6 21.2.5.5.
-void Builtins::Generate_RegExpPrototypeIgnoreCaseGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kIgnoreCase,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.ignoreCase");
+TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kIgnoreCase, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.ignoreCase");
}
// ES6 21.2.5.7.
-void Builtins::Generate_RegExpPrototypeMultilineGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kMultiline,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.multiline");
+TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kMultiline, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.multiline");
}
// ES6 21.2.5.12.
-void Builtins::Generate_RegExpPrototypeStickyGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kSticky,
- v8::Isolate::kRegExpPrototypeStickyGetter,
- "RegExp.prototype.sticky");
+TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kSticky, v8::Isolate::kRegExpPrototypeStickyGetter,
+ "RegExp.prototype.sticky");
}
// ES6 21.2.5.15.
-void Builtins::Generate_RegExpPrototypeUnicodeGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kUnicode,
- v8::Isolate::kRegExpPrototypeUnicodeGetter,
- "RegExp.prototype.unicode");
+TF_BUILTIN(RegExpPrototypeUnicodeGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kUnicode, v8::Isolate::kRegExpPrototypeUnicodeGetter,
+ "RegExp.prototype.unicode");
}
// The properties $1..$9 are the first nine capturing substrings of the last
@@ -986,722 +1193,977 @@ BUILTIN(RegExpRightContextGetter) {
return *isolate->factory()->NewSubString(last_subject, start_index, len);
}
-namespace {
-
// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-compiler::Node* RegExpExec(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* recv, compiler::Node* string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
+ Node* string) {
+ Isolate* isolate = this->isolate();
- Isolate* isolate = a->isolate();
+ Node* const null = NullConstant();
- Node* const null = a->NullConstant();
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label out(this), if_isfastpath(this), if_isslowpath(this);
- Variable var_result(a, MachineRepresentation::kTagged);
- Label out(a), call_builtin_exec(a), slow_path(a, Label::kDeferred);
+ Node* const map = LoadMap(regexp);
+ BranchIfFastRegExp(context, map, &if_isfastpath, &if_isslowpath);
- Node* const map = a->LoadMap(recv);
- BranchIfFastPath(a, context, map, &call_builtin_exec, &slow_path);
-
- a->Bind(&call_builtin_exec);
+ Bind(&if_isfastpath);
{
- Node* const result = RegExpPrototypeExecInternal(a, context, recv, string);
+ Node* const result = RegExpPrototypeExecBody(context, regexp, string, true);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&slow_path);
+ Bind(&if_isslowpath);
{
// Take the slow path of fetching the exec property, calling it, and
// verifying its return value.
// Get the exec property.
- Node* const name = a->HeapConstant(isolate->factory()->exec_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Node* const exec = a->CallStub(getproperty_callable, context, recv, name);
+ Node* const name = HeapConstant(isolate->factory()->exec_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const exec = CallStub(getproperty_callable, context, regexp, name);
// Is {exec} callable?
- Label if_iscallable(a), if_isnotcallable(a);
+ Label if_iscallable(this), if_isnotcallable(this);
- a->GotoIf(a->TaggedIsSmi(exec), &if_isnotcallable);
+ GotoIf(TaggedIsSmi(exec), &if_isnotcallable);
- Node* const exec_map = a->LoadMap(exec);
- a->Branch(a->IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
+ Node* const exec_map = LoadMap(exec);
+ Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
- a->Bind(&if_iscallable);
+ Bind(&if_iscallable);
{
Callable call_callable = CodeFactory::Call(isolate);
- Node* const result =
- a->CallJS(call_callable, context, exec, recv, string);
+ Node* const result = CallJS(call_callable, context, exec, regexp, string);
var_result.Bind(result);
- a->GotoIf(a->WordEqual(result, null), &out);
+ GotoIf(WordEqual(result, null), &out);
- ThrowIfNotJSReceiver(a, isolate, context, result,
+ ThrowIfNotJSReceiver(context, result,
MessageTemplate::kInvalidRegExpExecResult, "unused");
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isnotcallable);
+ Bind(&if_isnotcallable);
{
- a->ThrowIfNotInstanceType(context, recv, JS_REGEXP_TYPE,
- "RegExp.prototype.exec");
- a->Goto(&call_builtin_exec);
+ ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
+
+ Node* const result =
+ RegExpPrototypeExecBody(context, regexp, string, false);
+ var_result.Bind(result);
+ Goto(&out);
}
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype.test
// RegExp.prototype.test ( S )
-void Builtins::Generate_RegExpPrototypeTest(CodeStubAssembler* a) {
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSReceiver.
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.test");
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.test");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = a->ToString(context, maybe_string);
+ Node* const string = ToString(context, maybe_string);
- // Call exec.
- Node* const match_indices = RegExpExec(a, context, receiver, string);
-
- // Return true iff exec matched successfully.
- Node* const result = a->Select(a->WordEqual(match_indices, a->NullConstant()),
- a->FalseConstant(), a->TrueConstant());
- a->Return(result);
-}
-
-// ES#sec-regexp.prototype-@@match
-// RegExp.prototype [ @@match ] ( string )
-BUILTIN(RegExpPrototypeMatch) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@match");
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+ Bind(&fast_path);
+ {
+ Label if_didnotmatch(this);
+ RegExpPrototypeExecBodyWithoutResult(context, receiver, string,
+ &if_didnotmatch, true);
+ Return(TrueConstant());
- Handle<String> string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
- Object::ToString(isolate, string_obj));
+ Bind(&if_didnotmatch);
+ Return(FalseConstant());
+ }
- Handle<Object> global_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, global_obj,
- JSReceiver::GetProperty(recv, isolate->factory()->global_string()));
- const bool global = global_obj->BooleanValue();
+ Bind(&slow_path);
+ {
+ // Call exec.
+ Node* const match_indices = RegExpExec(context, receiver, string);
- if (!global) {
- RETURN_RESULT_OR_FAILURE(
- isolate,
- RegExpUtils::RegExpExec(isolate, recv, string,
- isolate->factory()->undefined_value()));
+ // Return true iff exec matched successfully.
+ Node* const result =
+ SelectBooleanConstant(WordNotEqual(match_indices, NullConstant()));
+ Return(result);
}
+}
- Handle<Object> unicode_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, unicode_obj,
- JSReceiver::GetProperty(recv, isolate->factory()->unicode_string()));
- const bool unicode = unicode_obj->BooleanValue();
-
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- RegExpUtils::SetLastIndex(isolate, recv, 0));
+Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
+ Node* const index,
+ Node* const is_unicode) {
+ Variable var_result(this, MachineRepresentation::kTagged);
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems =
- isolate->factory()->NewFixedArrayWithHoles(kInitialArraySize);
+ // Default to last_index + 1.
+ Node* const index_plus_one = SmiAdd(index, SmiConstant(1));
+ var_result.Bind(index_plus_one);
- int n = 0;
- for (;; n++) {
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- RegExpUtils::RegExpExec(isolate, recv, string,
- isolate->factory()->undefined_value()));
+ Label if_isunicode(this), out(this);
+ Branch(is_unicode, &if_isunicode, &out);
- if (result->IsNull(isolate)) {
- if (n == 0) return isolate->heap()->null_value();
- break;
- }
+ Bind(&if_isunicode);
+ {
+ Node* const string_length = LoadStringLength(string);
+ GotoUnless(SmiLessThan(index_plus_one, string_length), &out);
- Handle<Object> match_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
- Object::GetElement(isolate, result, 0));
+ Node* const lead = StringCharCodeAt(string, index);
+ GotoUnless(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
+ Int32Constant(0xD800)),
+ &out);
- Handle<String> match;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
- Object::ToString(isolate, match_obj));
+ Node* const trail = StringCharCodeAt(string, index_plus_one);
+ GotoUnless(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
+ Int32Constant(0xDC00)),
+ &out);
- elems = FixedArray::SetAndGrow(elems, n, match);
+ // At a surrogate pair, return index + 2.
+ Node* const index_plus_two = SmiAdd(index, SmiConstant(2));
+ var_result.Bind(index_plus_two);
- if (match->length() == 0) {
- RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
- isolate, recv, string, unicode));
- }
+ Goto(&out);
}
- elems->Shrink(n);
- return *isolate->factory()->NewJSArrayWithElements(elems);
+ Bind(&out);
+ return var_result.value();
}
namespace {
-void Generate_RegExpPrototypeSearchBody(CodeStubAssembler* a,
- compiler::Node* const receiver,
- compiler::Node* const string,
- compiler::Node* const context,
- bool is_fastpath) {
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray {
typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
- Isolate* const isolate = a->isolate();
+ public:
+ explicit GrowableFixedArray(CodeStubAssembler* a)
+ : assembler_(a),
+ var_array_(a, MachineRepresentation::kTagged),
+ var_length_(a, MachineType::PointerRepresentation()),
+ var_capacity_(a, MachineType::PointerRepresentation()) {
+ Initialize();
+ }
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* length() const { return var_length_.value(); }
- // Grab the initial value of last index.
- Node* const previous_last_index =
- is_fastpath ? FastLoadLastIndex(a, context, receiver)
- : SlowLoadLastIndex(a, context, receiver);
+ Variable* var_array() { return &var_array_; }
+ Variable* var_length() { return &var_length_; }
+ Variable* var_capacity() { return &var_capacity_; }
- // Ensure last index is 0.
- if (is_fastpath) {
- FastStoreLastIndex(a, context, receiver, smi_zero);
- } else {
- Label next(a);
- a->GotoIf(a->SameValue(previous_last_index, smi_zero, context), &next);
+ void Push(Node* const value) {
+ CodeStubAssembler* a = assembler_;
- SlowStoreLastIndex(a, context, receiver, smi_zero);
- a->Goto(&next);
- a->Bind(&next);
- }
+ Node* const length = var_length_.value();
+ Node* const capacity = var_capacity_.value();
- // Call exec.
- Node* const match_indices =
- is_fastpath ? RegExpPrototypeExecInternal(a, context, receiver, string)
- : RegExpExec(a, context, receiver, string);
+ Label grow(a), store(a);
+ a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
- // Reset last index if necessary.
- if (is_fastpath) {
- FastStoreLastIndex(a, context, receiver, previous_last_index);
- } else {
- Label next(a);
- Node* const current_last_index = SlowLoadLastIndex(a, context, receiver);
+ a->Bind(&grow);
+ {
+ Node* const new_capacity = NewCapacity(a, capacity);
+ Node* const new_array = ResizeFixedArray(length, new_capacity);
- a->GotoIf(a->SameValue(current_last_index, previous_last_index, context),
- &next);
+ var_capacity_.Bind(new_capacity);
+ var_array_.Bind(new_array);
+ a->Goto(&store);
+ }
- SlowStoreLastIndex(a, context, receiver, previous_last_index);
- a->Goto(&next);
- a->Bind(&next);
- }
+ a->Bind(&store);
+ {
+ Node* const array = var_array_.value();
+ a->StoreFixedArrayElement(array, length, value);
- // Return -1 if no match was found.
- {
- Label next(a);
- a->GotoUnless(a->WordEqual(match_indices, a->NullConstant()), &next);
- a->Return(a->SmiConstant(-1));
- a->Bind(&next);
+ Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
+ var_length_.Bind(new_length);
+ }
}
- // Return the index of the match.
- {
- Label fast_result(a), slow_result(a, Label::kDeferred);
+ Node* ToJSArray(Node* const context) {
+ CodeStubAssembler* a = assembler_;
- Node* const native_context = a->LoadNativeContext(context);
- Node* const initial_regexp_result_map =
- a->LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
- Node* const match_indices_map = a->LoadMap(match_indices);
+ const ElementsKind kind = FAST_ELEMENTS;
- a->Branch(a->WordEqual(match_indices_map, initial_regexp_result_map),
- &fast_result, &slow_result);
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
- a->Bind(&fast_result);
+ // Shrink to fit if necessary.
{
- Node* const index =
- a->LoadObjectField(match_indices, JSRegExpResult::kIndexOffset,
- MachineType::AnyTagged());
- a->Return(index);
- }
+ Label next(a);
- a->Bind(&slow_result);
- {
- Node* const name = a->HeapConstant(isolate->factory()->index_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Node* const index =
- a->CallStub(getproperty_callable, context, match_indices, name);
- a->Return(index);
+ Node* const length = var_length_.value();
+ Node* const capacity = var_capacity_.value();
+
+ a->GotoIf(a->WordEqual(length, capacity), &next);
+
+ Node* const array = ResizeFixedArray(length, length);
+ var_array_.Bind(array);
+ var_capacity_.Bind(length);
+ a->Goto(&next);
+
+ a->Bind(&next);
}
- }
-}
-} // namespace
+ Node* const result_length = a->SmiTag(length());
+ Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
+ kind, array_map, result_length, nullptr);
-// ES#sec-regexp.prototype-@@search
-// RegExp.prototype [ @@search ] ( string )
-void Builtins::Generate_RegExpPrototypeSearch(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ // Note: We do not currently shrink the fixed array.
- Isolate* const isolate = a->isolate();
+ a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+ return result;
+ }
- // Ensure {maybe_receiver} is a JSReceiver.
- Node* const map =
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search");
- Node* const receiver = maybe_receiver;
+ private:
+ void Initialize() {
+ CodeStubAssembler* a = assembler_;
- // Convert {maybe_string} to a String.
- Node* const string = a->ToString(context, maybe_string);
+ const ElementsKind kind = FAST_ELEMENTS;
- Label fast_path(a), slow_path(a);
- BranchIfFastPath(a, context, map, &fast_path, &slow_path);
+ static const int kInitialArraySize = 8;
+ Node* const capacity = a->IntPtrConstant(kInitialArraySize);
+ Node* const array = a->AllocateFixedArray(kind, capacity);
- a->Bind(&fast_path);
- Generate_RegExpPrototypeSearchBody(a, receiver, string, context, true);
+ a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
+ Heap::kTheHoleValueRootIndex);
- a->Bind(&slow_path);
- Generate_RegExpPrototypeSearchBody(a, receiver, string, context, false);
-}
+ var_array_.Bind(array);
+ var_capacity_.Bind(capacity);
+ var_length_.Bind(a->IntPtrConstant(0));
+ }
-namespace {
+ Node* NewCapacity(CodeStubAssembler* a, Node* const current_capacity) {
+ CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
+
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
-MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
- Handle<Object> object,
- uint32_t* out) {
- if (object->IsUndefined(isolate)) {
- *out = kMaxUInt32;
- return object;
+ Node* const new_capacity = a->IntPtrAdd(
+ a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
+ a->IntPtrConstant(16));
+
+ return new_capacity;
}
- Handle<Object> number;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
- *out = NumberToUint32(*number);
- return object;
-}
+ // Creates a new array with {new_capacity} and copies the first
+ // {element_count} elements from the current array.
+ Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
+ CodeStubAssembler* a = assembler_;
-bool AtSurrogatePair(Isolate* isolate, Handle<String> string, int index) {
- if (index + 1 >= string->length()) return false;
- const uint16_t first = string->Get(index);
- if (first < 0xD800 || first > 0xDBFF) return false;
- const uint16_t second = string->Get(index + 1);
- return (second >= 0xDC00 && second <= 0xDFFF);
-}
+ CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
+ CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
+ CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
-Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
- Handle<FixedArray> elems,
- int num_elems) {
- elems->Shrink(num_elems);
- return isolate->factory()->NewJSArrayWithElements(elems);
-}
+ const ElementsKind kind = FAST_ELEMENTS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ const CodeStubAssembler::AllocationFlags flags =
+ CodeStubAssembler::kAllowLargeObjectAllocation;
-MaybeHandle<JSArray> RegExpSplit(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> string,
- Handle<Object> limit_obj) {
- Factory* factory = isolate->factory();
+ Node* const from_array = var_array_.value();
+ Node* const to_array =
+ a->AllocateFixedArray(kind, new_capacity, mode, flags);
+ a->CopyFixedArrayElements(kind, from_array, kind, to_array, element_count,
+ new_capacity, barrier_mode, mode);
- uint32_t limit;
- RETURN_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit), JSArray);
+ return to_array;
+ }
- const int length = string->length();
+ private:
+ CodeStubAssembler* const assembler_;
+ Variable var_array_;
+ Variable var_length_;
+ Variable var_capacity_;
+};
- if (limit == 0) return factory->NewJSArray(0);
+} // namespace
+
+void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ const bool is_fastpath) {
+ Isolate* const isolate = this->isolate();
- Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- if (length == 0) {
- Handle<Object> match_indices;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, match_indices,
- RegExpImpl::Exec(regexp, string, 0, last_match_info), JSArray);
+ Node* const is_global =
+ FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
- if (!match_indices->IsNull(isolate)) return factory->NewJSArray(0);
+ Label if_isglobal(this), if_isnotglobal(this);
+ Branch(is_global, &if_isglobal, &if_isnotglobal);
- Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
- elems->set(0, *string);
- return factory->NewJSArrayWithElements(elems);
+ Bind(&if_isnotglobal);
+ {
+ Node* const result =
+ is_fastpath ? RegExpPrototypeExecBody(context, regexp, string, true)
+ : RegExpExec(context, regexp, string);
+ Return(result);
}
- int current_index = 0;
- int start_index = 0;
- int start_match = 0;
+ Bind(&if_isglobal);
+ {
+ Node* const is_unicode =
+ FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
- int num_elems = 0;
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
- while (true) {
- if (start_index == length) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ // Allocate an array to store the resulting match strings.
- Handle<Object> match_indices_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, string, start_index,
- isolate->regexp_last_match_info()),
- JSArray);
-
- if (match_indices_obj->IsNull(isolate)) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ GrowableFixedArray array(this);
- auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
+ // Loop preparations. Within the loop, collect results from RegExpExec
+ // and store match strings in the array.
- start_match = match_indices->Capture(0);
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity()};
+ Label loop(this, 3, vars), out(this);
+ Goto(&loop);
- if (start_match == length) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ Bind(&loop);
+ {
+ Variable var_match(this, MachineRepresentation::kTagged);
- const int end_index = match_indices->Capture(1);
+ Label if_didmatch(this), if_didnotmatch(this);
+ if (is_fastpath) {
+ // On the fast path, grab the matching string from the raw match index
+ // array.
+ Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, true);
- if (start_index == end_index && end_index == current_index) {
- const bool unicode = (regexp->GetFlags() & JSRegExp::kUnicode) != 0;
- if (unicode && AtSurrogatePair(isolate, string, start_index)) {
- start_index += 2;
+ Node* const match_from = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const match_to = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+ Node* match = SubString(context, string, match_from, match_to);
+ var_match.Bind(match);
+
+ Goto(&if_didmatch);
} else {
- start_index += 1;
+ DCHECK(!is_fastpath);
+ Node* const result = RegExpExec(context, regexp, string);
+
+ Label load_match(this);
+ Branch(WordEqual(result, null), &if_didnotmatch, &load_match);
+
+ Bind(&load_match);
+ {
+ Label fast_result(this), slow_result(this);
+ BranchIfFastRegExpResult(context, LoadMap(result), &fast_result,
+ &slow_result);
+
+ Bind(&fast_result);
+ {
+ Node* const result_fixed_array = LoadElements(result);
+ Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
+
+ // The match is guaranteed to be a string on the fast path.
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(match)));
+
+ var_match.Bind(match);
+ Goto(&if_didmatch);
+ }
+
+ Bind(&slow_result);
+ {
+ // TODO(ishell): Use GetElement stub once it's available.
+ Node* const name = smi_zero;
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const match =
+ CallStub(getproperty_callable, context, result, name);
+
+ var_match.Bind(ToString(context, match));
+ Goto(&if_didmatch);
+ }
+ }
+ }
+
+ Bind(&if_didnotmatch);
+ {
+ // Return null if there were no matches, otherwise just exit the loop.
+ GotoUnless(IntPtrEqual(array.length(), int_zero), &out);
+ Return(null);
+ }
+
+ Bind(&if_didmatch);
+ {
+ Node* match = var_match.value();
+
+ // Store the match, growing the fixed array if needed.
+
+ array.Push(match);
+
+ // Advance last index if the match is the empty string.
+
+ Node* const match_length = LoadStringLength(match);
+ GotoUnless(SmiEqual(match_length, smi_zero), &loop);
+
+ Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
+
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ last_index = CallStub(tolength_callable, context, last_index);
+
+ Node* const new_last_index =
+ AdvanceStringIndex(string, last_index, is_unicode);
+
+ StoreLastIndex(context, regexp, new_last_index, is_fastpath);
+
+ Goto(&loop);
}
- continue;
}
+ Bind(&out);
{
- Handle<String> substr =
- factory->NewSubString(string, current_index, start_match);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ // Wrap the match in a JSArray.
+
+ Node* const result = array.ToJSArray(context);
+ Return(result);
}
+ }
+}
+
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match");
+ Node* const receiver = maybe_receiver;
- if (static_cast<uint32_t>(num_elems) == limit) break;
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- for (int i = 2; i < match_indices->NumberOfCaptureRegisters(); i += 2) {
- const int start = match_indices->Capture(i);
- const int end = match_indices->Capture(i + 1);
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- if (end != -1) {
- Handle<String> substr = factory->NewSubString(string, start, end);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- } else {
- elems = FixedArray::SetAndGrow(elems, num_elems++,
- factory->undefined_value());
- }
+ Bind(&fast_path);
+ RegExpPrototypeMatchBody(context, receiver, string, true);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return NewJSArrayWithElements(isolate, elems, num_elems);
- }
- }
+ Bind(&slow_path);
+ RegExpPrototypeMatchBody(context, receiver, string, false);
+}
- start_index = current_index = end_index;
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
+ Node* const context, Node* const regexp, Node* const string) {
+ // Grab the initial value of last index.
+ Node* const previous_last_index = FastLoadLastIndex(regexp);
+
+ // Ensure last index is 0.
+ FastStoreLastIndex(regexp, SmiConstant(Smi::kZero));
+
+ // Call exec.
+ Label if_didnotmatch(this);
+ Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, true);
+
+ // Successful match.
+ {
+ // Reset last index.
+ FastStoreLastIndex(regexp, previous_last_index);
+
+ // Return the index of the match.
+ Node* const index = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Return(index);
}
- return NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&if_didnotmatch);
+ {
+ // Reset last index and return -1.
+ FastStoreLastIndex(regexp, previous_last_index);
+ Return(SmiConstant(-1));
+ }
}
-// ES##sec-speciesconstructor
-// SpeciesConstructor ( O, defaultConstructor )
-MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
- Isolate* isolate, Handle<JSReceiver> recv,
- Handle<JSFunction> default_ctor) {
- Handle<Object> ctor_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor_obj,
- JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
- Object);
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
+ Node* const context, Node* const regexp, Node* const string) {
+ Isolate* const isolate = this->isolate();
- if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- if (!ctor_obj->IsJSReceiver()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kConstructorNotReceiver),
- Object);
+ // Grab the initial value of last index.
+ Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
+
+ // Ensure last index is 0.
+ {
+ Label next(this);
+ GotoIf(SameValue(previous_last_index, smi_zero, context), &next);
+
+ SlowStoreLastIndex(context, regexp, smi_zero);
+ Goto(&next);
+ Bind(&next);
}
- Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+ // Call exec.
+ Node* const exec_result = RegExpExec(context, regexp, string);
+
+ // Reset last index if necessary.
+ {
+ Label next(this);
+ Node* const current_last_index = SlowLoadLastIndex(context, regexp);
+
+ GotoIf(SameValue(current_last_index, previous_last_index, context), &next);
- Handle<Object> species;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, species,
- JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
- Object);
+ SlowStoreLastIndex(context, regexp, previous_last_index);
+ Goto(&next);
- if (species->IsNull(isolate) || species->IsUndefined(isolate)) {
- return default_ctor;
+ Bind(&next);
}
- if (species->IsConstructor()) return species;
+ // Return -1 if no match was found.
+ {
+ Label next(this);
+ GotoUnless(WordEqual(exec_result, NullConstant()), &next);
+ Return(SmiConstant(-1));
+ Bind(&next);
+ }
+
+ // Return the index of the match.
+ {
+ Label fast_result(this), slow_result(this, Label::kDeferred);
+ BranchIfFastRegExpResult(context, LoadMap(exec_result), &fast_result,
+ &slow_result);
+
+ Bind(&fast_result);
+ {
+ Node* const index =
+ LoadObjectField(exec_result, JSRegExpResult::kIndexOffset);
+ Return(index);
+ }
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+ Bind(&slow_result);
+ {
+ Node* const name = HeapConstant(isolate->factory()->index_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const index =
+ CallStub(getproperty_callable, context, exec_result, name);
+ Return(index);
+ }
+ }
}
-} // namespace
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
-// ES#sec-regexp.prototype-@@split
-// RegExp.prototype [ @@split ] ( string, limit )
-BUILTIN(RegExpPrototypeSplit) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@split");
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search");
+ Node* const receiver = maybe_receiver;
- Factory* factory = isolate->factory();
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
- Handle<Object> limit_obj = args.atOrUndefined(isolate, 2);
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- Handle<String> string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
- Object::ToString(isolate, string_obj));
+ Bind(&fast_path);
+ RegExpPrototypeSearchBodyFast(context, receiver, string);
- if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
- RETURN_RESULT_OR_FAILURE(
- isolate,
- RegExpSplit(isolate, Handle<JSRegExp>::cast(recv), string, limit_obj));
- }
+ Bind(&slow_path);
+ RegExpPrototypeSearchBodySlow(context, receiver, string);
+}
- Handle<JSFunction> regexp_fun = isolate->regexp_function();
- Handle<Object> ctor;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+// Generates the fast path for @@split. {regexp} is an unmodified JSRegExp,
+// {string} is a String, and {limit} is a Smi.
+void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Node* const limit) {
+ Isolate* isolate = this->isolate();
- Handle<Object> flags_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+ Node* const null = NullConstant();
+ Node* const smi_zero = SmiConstant(0);
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_limit = SmiUntag(limit);
- Handle<String> flags;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
- Object::ToString(isolate, flags_obj));
+ const ElementsKind kind = FAST_ELEMENTS;
+ const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
- const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+ Node* const allocation_site = nullptr;
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
- Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
- const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+ Label return_empty_array(this, Label::kDeferred);
- Handle<String> new_flags = flags;
- if (!sticky) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
- factory->NewConsString(flags, y_str));
+ // If limit is zero, return an empty array.
+ {
+ Label next(this), if_limitiszero(this, Label::kDeferred);
+ Branch(SmiEqual(limit, smi_zero), &return_empty_array, &next);
+ Bind(&next);
}
- Handle<JSReceiver> splitter;
+ Node* const string_length = LoadStringLength(string);
+
+ // If passed the empty {string}, return either an empty array or a singleton
+ // array depending on whether the {regexp} matches.
{
- const int argc = 2;
+ Label next(this), if_stringisempty(this, Label::kDeferred);
+ Branch(SmiEqual(string_length, smi_zero), &if_stringisempty, &next);
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = recv;
- argv[1] = new_flags;
+ Bind(&if_stringisempty);
+ {
+ Node* const last_match_info = LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
- Handle<Object> splitter_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices = CallStub(exec_callable, context, regexp,
+ string, smi_zero, last_match_info);
+
+ Label return_singleton_array(this);
+ Branch(WordEqual(match_indices, null), &return_singleton_array,
+ &return_empty_array);
+
+ Bind(&return_singleton_array);
+ {
+ Node* const length = SmiConstant(1);
+ Node* const capacity = IntPtrConstant(1);
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
+
+ Node* const fixed_array = LoadElements(result);
+ StoreFixedArrayElement(fixed_array, 0, string);
- splitter = Handle<JSReceiver>::cast(splitter_obj);
+ Return(result);
+ }
+ }
+
+ Bind(&next);
}
- uint32_t limit;
- RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+ // Loop preparations.
- const int length = string->length();
+ GrowableFixedArray array(this);
- if (limit == 0) return *factory->NewJSArray(0);
+ Variable var_last_matched_until(this, MachineRepresentation::kTagged);
+ Variable var_next_search_from(this, MachineRepresentation::kTagged);
- if (length == 0) {
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
- factory->undefined_value()));
+ var_last_matched_until.Bind(smi_zero);
+ var_next_search_from.Bind(smi_zero);
- if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity(), &var_last_matched_until,
+ &var_next_search_from};
+ const int vars_count = sizeof(vars) / sizeof(vars[0]);
+ Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this);
+ Goto(&loop);
- Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
- elems->set(0, *string);
- return *factory->NewJSArrayWithElements(elems);
- }
+ Bind(&loop);
+ {
+ Node* const next_search_from = var_next_search_from.value();
+ Node* const last_matched_until = var_last_matched_until.value();
- // TODO(jgruber): Wrap this in a helper class.
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
- int num_elems = 0;
+ // We're done if we've reached the end of the string.
+ {
+ Label next(this);
+ Branch(SmiEqual(next_search_from, string_length), &push_suffix_and_out,
+ &next);
+ Bind(&next);
+ }
- int string_index = 0;
- int prev_string_index = 0;
- while (string_index < length) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+ // Search for the given {regexp}.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
- factory->undefined_value()));
+ Node* const last_match_info = LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ next_search_from, last_match_info);
- if (result->IsNull(isolate)) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
- continue;
+ // We're done if no match was found.
+ {
+ Label next(this);
+ Branch(WordEqual(match_indices, null), &push_suffix_and_out, &next);
+ Bind(&next);
}
- // TODO(jgruber): Extract toLength of some property into function.
- Handle<Object> last_index_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+ Node* const match_from = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
- const int last_index = Handle<Smi>::cast(last_index_obj)->value();
-
- const int end = std::min(last_index, length);
- if (end == prev_string_index) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
- continue;
+ // We're done if the match starts beyond the string.
+ {
+ Label next(this);
+ Branch(WordEqual(match_from, string_length), &push_suffix_and_out, &next);
+ Bind(&next);
}
+ Node* const match_to = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+ // Advance index and continue if the match is empty.
{
- Handle<String> substr =
- factory->NewSubString(string, prev_string_index, string_index);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return *NewJSArrayWithElements(isolate, elems, num_elems);
- }
+ Label next(this);
+
+ GotoUnless(SmiEqual(match_to, next_search_from), &next);
+ GotoUnless(SmiEqual(match_to, last_matched_until), &next);
+
+ Node* const is_unicode = FastFlagGetter(regexp, JSRegExp::kUnicode);
+ Node* const new_next_search_from =
+ AdvanceStringIndex(string, next_search_from, is_unicode);
+ var_next_search_from.Bind(new_next_search_from);
+ Goto(&loop);
+
+ Bind(&next);
+ }
+
+ // A valid match was found, add the new substring to the array.
+ {
+ Node* const from = last_matched_until;
+ Node* const to = match_from;
+
+ Node* const substr = SubString(context, string, from, to);
+ array.Push(substr);
+
+ GotoIf(WordEqual(array.length(), int_limit), &out);
}
- prev_string_index = end;
+ // Add all captures to the array.
+ {
+ Node* const num_registers = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kNumberOfCapturesIndex);
+ Node* const int_num_registers = SmiUntag(num_registers);
+
+ Variable var_reg(this, MachineType::PointerRepresentation());
+ var_reg.Bind(IntPtrConstant(2));
- Handle<Object> num_captures_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, num_captures_obj,
- Object::GetProperty(result, isolate->factory()->length_string()));
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity(), &var_reg};
+ const int vars_count = sizeof(vars) / sizeof(vars[0]);
+ Label nested_loop(this, vars_count, vars), nested_loop_out(this);
+ Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop,
+ &nested_loop_out);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
- const int num_captures =
- std::max(Handle<Smi>::cast(num_captures_obj)->value(), 0);
-
- for (int i = 1; i < num_captures; i++) {
- Handle<Object> capture;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, capture, Object::GetElement(isolate, result, i));
- elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return *NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&nested_loop);
+ {
+ Node* const reg = var_reg.value();
+ Node* const from = LoadFixedArrayElement(
+ match_indices, reg,
+ RegExpMatchInfo::kFirstCaptureIndex * kPointerSize, mode);
+ Node* const to = LoadFixedArrayElement(
+ match_indices, reg,
+ (RegExpMatchInfo::kFirstCaptureIndex + 1) * kPointerSize, mode);
+
+ Label select_capture(this), select_undefined(this), store_value(this);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined,
+ &select_capture);
+
+ Bind(&select_capture);
+ {
+ Node* const substr = SubString(context, string, from, to);
+ var_value.Bind(substr);
+ Goto(&store_value);
+ }
+
+ Bind(&select_undefined);
+ {
+ Node* const undefined = UndefinedConstant();
+ var_value.Bind(undefined);
+ Goto(&store_value);
+ }
+
+ Bind(&store_value);
+ {
+ array.Push(var_value.value());
+ GotoIf(WordEqual(array.length(), int_limit), &out);
+
+ Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
+ var_reg.Bind(new_reg);
+
+ Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop,
+ &nested_loop_out);
+ }
}
+
+ Bind(&nested_loop_out);
}
- string_index = prev_string_index;
+ var_last_matched_until.Bind(match_to);
+ var_next_search_from.Bind(match_to);
+ Goto(&loop);
}
+ Bind(&push_suffix_and_out);
{
- Handle<String> substr =
- factory->NewSubString(string, prev_string_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ Node* const from = var_last_matched_until.value();
+ Node* const to = string_length;
+
+ Node* const substr = SubString(context, string, from, to);
+ array.Push(substr);
+
+ Goto(&out);
}
- return *NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&out);
+ {
+ Node* const result = array.ToJSArray(context);
+ Return(result);
+ }
+
+ Bind(&return_empty_array);
+ {
+ Node* const length = smi_zero;
+ Node* const capacity = int_zero;
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
+ Return(result);
+ }
}
-namespace {
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const maybe_limit = Parameter(2);
+ Node* const context = Parameter(5);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
+
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
-compiler::Node* ReplaceGlobalCallableFastPath(
- CodeStubAssembler* a, compiler::Node* context, compiler::Node* regexp,
- compiler::Node* subject_string, compiler::Node* replace_callable) {
+ Bind(&fast_path);
+ {
+ // TODO(jgruber): Even if map checks send us to the fast path, we still need
+ // to verify the constructor property and jump to the slow path if it has
+ // been changed.
+
+ // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+ Variable var_limit(this, MachineRepresentation::kTagged);
+ Label if_limitissmimax(this), limit_done(this);
+
+ GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
+
+ {
+ Node* const limit = ToUint32(context, maybe_limit);
+ GotoUnless(TaggedIsSmi(limit), &if_limitissmimax);
+
+ var_limit.Bind(limit);
+ Goto(&limit_done);
+ }
+
+ Bind(&if_limitissmimax);
+ {
+ // TODO(jgruber): In this case, we can probably generation of limit checks
+ // in Generate_RegExpPrototypeSplitBody.
+ Node* const smi_max = SmiConstant(Smi::kMaxValue);
+ var_limit.Bind(smi_max);
+ Goto(&limit_done);
+ }
+
+ Bind(&limit_done);
+ {
+ Node* const limit = var_limit.value();
+ RegExpPrototypeSplitBody(context, receiver, string, limit);
+ }
+ }
+
+ Bind(&slow_path);
+ {
+ Node* const result = CallRuntime(Runtime::kRegExpSplit, context, receiver,
+ string, maybe_limit);
+ Return(result);
+ }
+}
+
+Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
+ Node* context, Node* regexp, Node* string, Node* replace_callable) {
// The fast path is reached only if {receiver} is a global unmodified
// JSRegExp instance and {replace_callable} is callable.
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
+ Isolate* const isolate = this->isolate();
- Node* const null = a->NullConstant();
- Node* const undefined = a->UndefinedConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const int_one = a->IntPtrConstant(1);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* const null = NullConstant();
+ Node* const undefined = UndefinedConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_one = IntPtrConstant(1);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- Node* const native_context = a->LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
// Set last index to 0.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
// Allocate {result_array}.
Node* result_array;
{
ElementsKind kind = FAST_ELEMENTS;
- Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
- Node* const capacity = a->IntPtrConstant(16);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ Node* const capacity = IntPtrConstant(16);
Node* const length = smi_zero;
Node* const allocation_site = nullptr;
- CodeStubAssembler::ParameterMode capacity_mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
+ ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
- result_array = a->AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, capacity_mode);
+ result_array = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, capacity_mode);
}
// Call into runtime for RegExpExecMultiple.
- Node* last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const res =
- a->CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
- subject_string, last_match_info, result_array);
+ Node* last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ Node* const res = CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
+ string, last_match_info, result_array);
// Reset last index to 0.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
// If no matches, return the subject string.
- var_result.Bind(subject_string);
- a->GotoIf(a->WordEqual(res, null), &out);
+ var_result.Bind(string);
+ GotoIf(WordEqual(res, null), &out);
// Reload last match info since it might have changed.
- last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const res_length = a->LoadJSArrayLength(res);
- Node* const res_elems = a->LoadElements(res);
- CSA_ASSERT(a, a->HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+ Node* const res_length = LoadJSArrayLength(res);
+ Node* const res_elems = LoadElements(res);
+ CSA_ASSERT(this, HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
- CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const num_capture_registers = a->LoadFixedArrayElement(
- last_match_info,
- a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0, mode);
+ Node* const num_capture_registers = LoadFixedArrayElement(
+ last_match_info, RegExpMatchInfo::kNumberOfCapturesIndex);
- Label if_hasexplicitcaptures(a), if_noexplicitcaptures(a), create_result(a);
- a->Branch(a->SmiEqual(num_capture_registers, a->SmiConstant(Smi::FromInt(2))),
- &if_noexplicitcaptures, &if_hasexplicitcaptures);
+ Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
+ create_result(this);
+ Branch(SmiEqual(num_capture_registers, SmiConstant(Smi::FromInt(2))),
+ &if_noexplicitcaptures, &if_hasexplicitcaptures);
- a->Bind(&if_noexplicitcaptures);
+ Bind(&if_noexplicitcaptures);
{
// If the number of captures is two then there are no explicit captures in
// the regexp, just the implicit capture that captures the whole match. In
@@ -1710,394 +2172,358 @@ compiler::Node* ReplaceGlobalCallableFastPath(
// input string and some replacements that were returned from the replace
// function.
- Variable var_match_start(a, MachineRepresentation::kTagged);
+ Variable var_match_start(this, MachineRepresentation::kTagged);
var_match_start.Bind(smi_zero);
- Node* const end = a->SmiUntag(res_length);
- Variable var_i(a, MachineType::PointerRepresentation());
+ Node* const end = SmiUntag(res_length);
+ Variable var_i(this, MachineType::PointerRepresentation());
var_i.Bind(int_zero);
Variable* vars[] = {&var_i, &var_match_start};
- Label loop(a, 2, vars);
- a->Goto(&loop);
- a->Bind(&loop);
+ Label loop(this, 2, vars);
+ Goto(&loop);
+ Bind(&loop);
{
Node* const i = var_i.value();
- a->GotoUnless(a->IntPtrLessThan(i, end), &create_result);
+ GotoUnless(IntPtrLessThan(i, end), &create_result);
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const elem = a->LoadFixedArrayElement(res_elems, i, 0, mode);
+ Node* const elem = LoadFixedArrayElement(res_elems, i);
- Label if_issmi(a), if_isstring(a), loop_epilogue(a);
- a->Branch(a->TaggedIsSmi(elem), &if_issmi, &if_isstring);
+ Label if_issmi(this), if_isstring(this), loop_epilogue(this);
+ Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
// Integers represent slices of the original string.
- Label if_isnegativeorzero(a), if_ispositive(a);
- a->BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
- &if_ispositive);
+ Label if_isnegativeorzero(this), if_ispositive(this);
+ BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
+ &if_ispositive);
- a->Bind(&if_ispositive);
+ Bind(&if_ispositive);
{
- Node* const int_elem = a->SmiUntag(elem);
+ Node* const int_elem = SmiUntag(elem);
Node* const new_match_start =
- a->IntPtrAdd(a->WordShr(int_elem, a->IntPtrConstant(11)),
- a->WordAnd(int_elem, a->IntPtrConstant(0x7ff)));
- var_match_start.Bind(a->SmiTag(new_match_start));
- a->Goto(&loop_epilogue);
+ IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
+ WordAnd(int_elem, IntPtrConstant(0x7ff)));
+ var_match_start.Bind(SmiTag(new_match_start));
+ Goto(&loop_epilogue);
}
- a->Bind(&if_isnegativeorzero);
+ Bind(&if_isnegativeorzero);
{
- Node* const next_i = a->IntPtrAdd(i, int_one);
+ Node* const next_i = IntPtrAdd(i, int_one);
var_i.Bind(next_i);
- Node* const next_elem =
- a->LoadFixedArrayElement(res_elems, next_i, 0, mode);
+ Node* const next_elem = LoadFixedArrayElement(res_elems, next_i);
- Node* const new_match_start = a->SmiSub(next_elem, elem);
+ Node* const new_match_start = SmiSub(next_elem, elem);
var_match_start.Bind(new_match_start);
- a->Goto(&loop_epilogue);
+ Goto(&loop_epilogue);
}
}
- a->Bind(&if_isstring);
+ Bind(&if_isstring);
{
- CSA_ASSERT(a, a->IsStringInstanceType(a->LoadInstanceType(elem)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(elem)));
Callable call_callable = CodeFactory::Call(isolate);
Node* const replacement_obj =
- a->CallJS(call_callable, context, replace_callable, undefined, elem,
- var_match_start.value(), subject_string);
+ CallJS(call_callable, context, replace_callable, undefined, elem,
+ var_match_start.value(), string);
- Node* const replacement_str = a->ToString(context, replacement_obj);
- a->StoreFixedArrayElement(res_elems, i, replacement_str);
+ Node* const replacement_str = ToString(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, i, replacement_str);
- Node* const elem_length = a->LoadStringLength(elem);
+ Node* const elem_length = LoadStringLength(elem);
Node* const new_match_start =
- a->SmiAdd(var_match_start.value(), elem_length);
+ SmiAdd(var_match_start.value(), elem_length);
var_match_start.Bind(new_match_start);
- a->Goto(&loop_epilogue);
+ Goto(&loop_epilogue);
}
- a->Bind(&loop_epilogue);
+ Bind(&loop_epilogue);
{
- var_i.Bind(a->IntPtrAdd(var_i.value(), int_one));
- a->Goto(&loop);
+ var_i.Bind(IntPtrAdd(var_i.value(), int_one));
+ Goto(&loop);
}
}
}
- a->Bind(&if_hasexplicitcaptures);
+ Bind(&if_hasexplicitcaptures);
{
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
-
Node* const from = int_zero;
- Node* const to = a->SmiUntag(res_length);
+ Node* const to = SmiUntag(res_length);
const int increment = 1;
- a->BuildFastLoop(
+ BuildFastLoop(
MachineType::PointerRepresentation(), from, to,
- [res_elems, isolate, native_context, context, undefined,
- replace_callable, mode](CodeStubAssembler* a, Node* index) {
- Node* const elem =
- a->LoadFixedArrayElement(res_elems, index, 0, mode);
+ [this, res_elems, isolate, native_context, context, undefined,
+ replace_callable](Node* index) {
+ Node* const elem = LoadFixedArrayElement(res_elems, index);
- Label do_continue(a);
- a->GotoIf(a->TaggedIsSmi(elem), &do_continue);
+ Label do_continue(this);
+ GotoIf(TaggedIsSmi(elem), &do_continue);
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- CSA_ASSERT(a, a->HasInstanceType(elem, JS_ARRAY_TYPE));
+ CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
// TODO(jgruber): Remove indirection through Call->ReflectApply.
Callable call_callable = CodeFactory::Call(isolate);
- Node* const reflect_apply = a->LoadContextElement(
- native_context, Context::REFLECT_APPLY_INDEX);
+ Node* const reflect_apply =
+ LoadContextElement(native_context, Context::REFLECT_APPLY_INDEX);
Node* const replacement_obj =
- a->CallJS(call_callable, context, reflect_apply, undefined,
- replace_callable, undefined, elem);
+ CallJS(call_callable, context, reflect_apply, undefined,
+ replace_callable, undefined, elem);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
- Node* const replacement_str = a->ToString(context, replacement_obj);
- a->StoreFixedArrayElement(res_elems, index, replacement_str,
- UPDATE_WRITE_BARRIER, mode);
+ Node* const replacement_str = ToString(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, index, replacement_str);
- a->Goto(&do_continue);
- a->Bind(&do_continue);
+ Goto(&do_continue);
+ Bind(&do_continue);
},
increment, CodeStubAssembler::IndexAdvanceMode::kPost);
- a->Goto(&create_result);
+ Goto(&create_result);
}
- a->Bind(&create_result);
+ Bind(&create_result);
{
- Node* const result = a->CallRuntime(Runtime::kStringBuilderConcat, context,
- res, res_length, subject_string);
+ Node* const result = CallRuntime(Runtime::kStringBuilderConcat, context,
+ res, res_length, string);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-compiler::Node* ReplaceSimpleStringFastPath(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* regexp,
- compiler::Node* subject_string,
- compiler::Node* replace_string) {
+Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
+ Node* context, Node* regexp, Node* string, Node* replace_string) {
// The fast path is reached only if {receiver} is an unmodified
// JSRegExp instance, {replace_value} is non-callable, and
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ Isolate* const isolate = this->isolate();
- Isolate* const isolate = a->isolate();
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- Node* const null = a->NullConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
-
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
// Load the last match info.
- Node* const native_context = a->LoadNativeContext(context);
- Node* const last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
// Is {regexp} global?
- Label if_isglobal(a), if_isnonglobal(a);
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Label if_isglobal(this), if_isnonglobal(this);
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
Node* const is_global =
- a->WordAnd(a->SmiUntag(flags), a->IntPtrConstant(JSRegExp::kGlobal));
- a->Branch(a->WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+ WordAnd(SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal));
+ Branch(WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
- a->Bind(&if_isglobal);
+ Bind(&if_isglobal);
{
// Hand off global regexps to runtime.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
Node* const result =
- a->CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
- subject_string, regexp, replace_string, last_match_info);
+ CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
+ string, regexp, replace_string, last_match_info);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isnonglobal);
+ Bind(&if_isnonglobal);
{
// Run exec, then manually construct the resulting string.
Callable exec_callable = CodeFactory::RegExpExec(isolate);
- Node* const match_indices =
- a->CallStub(exec_callable, context, regexp, subject_string, smi_zero,
- last_match_info);
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ smi_zero, last_match_info);
- Label if_matched(a), if_didnotmatch(a);
- a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+ Label if_matched(this), if_didnotmatch(this);
+ Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
- a->Bind(&if_didnotmatch);
+ Bind(&if_didnotmatch);
{
- FastStoreLastIndex(a, context, regexp, smi_zero);
- var_result.Bind(subject_string);
- a->Goto(&out);
+ FastStoreLastIndex(regexp, smi_zero);
+ var_result.Bind(string);
+ Goto(&out);
}
- a->Bind(&if_matched);
+ Bind(&if_matched);
{
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
-
Node* const subject_start = smi_zero;
- Node* const match_start = a->LoadFixedArrayElement(
- match_indices, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex),
- 0, mode);
- Node* const match_end = a->LoadFixedArrayElement(
- match_indices,
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0, mode);
- Node* const subject_end = a->LoadStringLength(subject_string);
-
- Label if_replaceisempty(a), if_replaceisnotempty(a);
- Node* const replace_length = a->LoadStringLength(replace_string);
- a->Branch(a->SmiEqual(replace_length, smi_zero), &if_replaceisempty,
- &if_replaceisnotempty);
-
- a->Bind(&if_replaceisempty);
+ Node* const match_start = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const match_end = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ Node* const subject_end = LoadStringLength(string);
+
+ Label if_replaceisempty(this), if_replaceisnotempty(this);
+ Node* const replace_length = LoadStringLength(replace_string);
+ Branch(SmiEqual(replace_length, smi_zero), &if_replaceisempty,
+ &if_replaceisnotempty);
+
+ Bind(&if_replaceisempty);
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
Node* const first_part =
- a->SubString(context, subject_string, subject_start, match_start);
+ SubString(context, string, subject_start, match_start);
Node* const second_part =
- a->SubString(context, subject_string, match_end, subject_end);
+ SubString(context, string, match_end, subject_end);
- Node* const result = a->StringAdd(context, first_part, second_part);
+ Node* const result = StringAdd(context, first_part, second_part);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_replaceisnotempty);
+ Bind(&if_replaceisnotempty);
{
Node* const first_part =
- a->SubString(context, subject_string, subject_start, match_start);
+ SubString(context, string, subject_start, match_start);
Node* const second_part = replace_string;
Node* const third_part =
- a->SubString(context, subject_string, match_end, subject_end);
+ SubString(context, string, match_end, subject_end);
- Node* result = a->StringAdd(context, first_part, second_part);
- result = a->StringAdd(context, result, third_part);
+ Node* result = StringAdd(context, first_part, second_part);
+ result = StringAdd(context, result, third_part);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
}
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype-@@replace
// RegExp.prototype [ @@replace ] ( string, replaceValue )
-void Builtins::Generate_RegExpPrototypeReplace(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const replace_value = a->Parameter(2);
- Node* const context = a->Parameter(5);
-
- Node* const int_zero = a->IntPtrConstant(0);
+TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const replace_value = Parameter(2);
+ Node* const context = Parameter(5);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map =
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@replace");
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Callable tostring_callable = CodeFactory::ToString(isolate);
- Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const string = CallStub(tostring_callable, context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
- Label checkreplacecallable(a), runtime(a, Label::kDeferred), fastpath(a);
- BranchIfFastPath(a, context, map, &checkreplacecallable, &runtime);
+ Label checkreplacecallable(this), runtime(this, Label::kDeferred),
+ fastpath(this);
+ BranchIfFastRegExp(context, map, &checkreplacecallable, &runtime);
- a->Bind(&checkreplacecallable);
+ Bind(&checkreplacecallable);
Node* const regexp = receiver;
// 2. Is {replace_value} callable?
- Label checkreplacestring(a), if_iscallable(a);
- a->GotoIf(a->TaggedIsSmi(replace_value), &checkreplacestring);
+ Label checkreplacestring(this), if_iscallable(this);
+ GotoIf(TaggedIsSmi(replace_value), &checkreplacestring);
- Node* const replace_value_map = a->LoadMap(replace_value);
- a->Branch(a->IsCallableMap(replace_value_map), &if_iscallable,
- &checkreplacestring);
+ Node* const replace_value_map = LoadMap(replace_value);
+ Branch(IsCallableMap(replace_value_map), &if_iscallable, &checkreplacestring);
// 3. Does ToString({replace_value}) contain '$'?
- a->Bind(&checkreplacestring);
+ Bind(&checkreplacestring);
{
Node* const replace_string =
- a->CallStub(tostring_callable, context, replace_value);
+ CallStub(tostring_callable, context, replace_value);
- Node* const dollar_char = a->IntPtrConstant('$');
- Node* const smi_minusone = a->SmiConstant(Smi::FromInt(-1));
- a->GotoUnless(a->SmiEqual(a->StringIndexOfChar(context, replace_string,
- dollar_char, int_zero),
- smi_minusone),
- &runtime);
+ Node* const dollar_char = Int32Constant('$');
+ Node* const smi_minusone = SmiConstant(Smi::FromInt(-1));
+ GotoUnless(SmiEqual(StringIndexOfChar(context, replace_string, dollar_char,
+ SmiConstant(0)),
+ smi_minusone),
+ &runtime);
- a->Return(ReplaceSimpleStringFastPath(a, context, regexp, string,
- replace_string));
+ Return(
+ ReplaceSimpleStringFastPath(context, regexp, string, replace_string));
}
// {regexp} is unmodified and {replace_value} is callable.
- a->Bind(&if_iscallable);
+ Bind(&if_iscallable);
{
Node* const replace_callable = replace_value;
// Check if the {regexp} is global.
- Label if_isglobal(a), if_isnotglobal(a);
- Node* const is_global = FastFlagGetter(a, regexp, JSRegExp::kGlobal);
- a->Branch(is_global, &if_isglobal, &if_isnotglobal);
+ Label if_isglobal(this), if_isnotglobal(this);
+ Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
+ Branch(is_global, &if_isglobal, &if_isnotglobal);
- a->Bind(&if_isglobal);
+ Bind(&if_isglobal);
{
Node* const result = ReplaceGlobalCallableFastPath(
- a, context, regexp, string, replace_callable);
- a->Return(result);
+ context, regexp, string, replace_callable);
+ Return(result);
}
- a->Bind(&if_isnotglobal);
+ Bind(&if_isnotglobal);
{
Node* const result =
- a->CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
- context, string, regexp, replace_callable);
- a->Return(result);
+ CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+ context, string, regexp, replace_callable);
+ Return(result);
}
}
- a->Bind(&runtime);
+ Bind(&runtime);
{
- Node* const result = a->CallRuntime(Runtime::kRegExpReplace, context,
- receiver, string, replace_value);
- a->Return(result);
+ Node* const result = CallRuntime(Runtime::kRegExpReplace, context, receiver,
+ string, replace_value);
+ Return(result);
}
}
// Simple string matching functionality for internal use which does not modify
// the last match info.
-void Builtins::Generate_RegExpInternalMatch(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const regexp = a->Parameter(1);
- Node* const string = a->Parameter(2);
- Node* const context = a->Parameter(5);
+TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
+ Node* const regexp = Parameter(1);
+ Node* const string = Parameter(2);
+ Node* const context = Parameter(5);
- Node* const null = a->NullConstant();
- Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const null = NullConstant();
+ Node* const smi_zero = SmiConstant(Smi::FromInt(0));
- Node* const native_context = a->LoadNativeContext(context);
- Node* const internal_match_info = a->LoadContextElement(
+ Node* const native_context = LoadNativeContext(context);
+ Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
- Callable exec_callable = CodeFactory::RegExpExec(isolate);
- Node* const match_indices = a->CallStub(
- exec_callable, context, regexp, string, smi_zero, internal_match_info);
+ Callable exec_callable = CodeFactory::RegExpExec(isolate());
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ smi_zero, internal_match_info);
- Label if_matched(a), if_didnotmatch(a);
- a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+ Label if_matched(this), if_didnotmatch(this);
+ Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
- a->Bind(&if_didnotmatch);
- a->Return(null);
+ Bind(&if_didnotmatch);
+ Return(null);
- a->Bind(&if_matched);
+ Bind(&if_matched);
{
- Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
- match_indices, string);
- a->Return(result);
+ Node* result =
+ ConstructNewResultFromMatchInfo(context, match_indices, string);
+ Return(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 2b5bf498a5..53caf1fe21 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,7 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
compiler::Node* context,
compiler::Node** out_instance_type,
compiler::Node** out_backing_store) {
- using namespace compiler;
+ using compiler::Node;
CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
not_float_or_clamped(a), invalid(a);
@@ -43,8 +43,8 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
// Fail if the array's instance type is not JSTypedArray.
a->Bind(&not_smi);
- a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
- a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ a->Branch(a->Word32Equal(a->LoadInstanceType(tagged),
+ a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
&is_typed_array, &not_typed_array);
a->Bind(&not_typed_array);
a->Goto(&invalid);
@@ -88,14 +88,15 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
context,
a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
- *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+ *out_backing_store =
+ a->IntPtrAdd(a->BitcastTaggedToWord(backing_store), byte_offset);
}
// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
compiler::Node* tagged,
compiler::Node* context) {
- using namespace compiler;
+ using compiler::Node;
CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
Callable to_number = CodeFactory::ToNumber(a->isolate());
@@ -139,13 +140,13 @@ compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
compiler::Node* array_length_word,
compiler::Node* context) {
- using namespace compiler;
+ using compiler::Node;
// Check if the index is in bounds. If not, throw RangeError.
CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
// TODO(jkummerow): Use unsigned comparison instead of "i<0 || i>length".
a->Branch(
- a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
- a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+ a->Word32Or(a->Int32LessThan(index_word, a->Int32Constant(0)),
+ a->Int32GreaterThanOrEqual(index_word, array_length_word)),
&if_notinbounds, &if_inbounds);
a->Bind(&if_notinbounds);
a->Return(
@@ -155,24 +156,25 @@ void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
} // anonymous namespace
-void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
- using namespace compiler;
- Node* array = a->Parameter(1);
- Node* index = a->Parameter(2);
- Node* context = a->Parameter(3 + 2);
+void Builtins::Generate_AtomicsLoad(compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ CodeStubAssembler a(state);
+ Node* array = a.Parameter(1);
+ Node* index = a.Parameter(2);
+ Node* context = a.Parameter(3 + 2);
Node* instance_type;
Node* backing_store;
- ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
- Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
- Node* array_length_word32 = a->TruncateTaggedToWord32(
- context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(a, index_word32, array_length_word32, context);
- Node* index_word = a->ChangeUint32ToWord(index_word32);
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+ Node* array_length_word32 = a.TruncateTaggedToWord32(
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+ Node* index_word = a.ChangeUint32ToWord(index_word32);
- CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
- other(a);
+ CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a),
+ other(&a);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -180,59 +182,60 @@ void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
CodeStubAssembler::Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
- a->Switch(instance_type, &other, case_values, case_labels,
- arraysize(case_labels));
+ a.Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
- a->Bind(&i8);
- a->Return(
- a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+ a.Bind(&i8);
+ a.Return(a.SmiFromWord32(
+ a.AtomicLoad(MachineType::Int8(), backing_store, index_word)));
- a->Bind(&u8);
- a->Return(a->SmiTag(
- a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+ a.Bind(&u8);
+ a.Return(a.SmiFromWord32(
+ a.AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
- a->Bind(&i16);
- a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
- a->WordShl(index_word, 1))));
+ a.Bind(&i16);
+ a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Int16(), backing_store,
+ a.WordShl(index_word, 1))));
- a->Bind(&u16);
- a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
- a->WordShl(index_word, 1))));
+ a.Bind(&u16);
+ a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Uint16(), backing_store,
+ a.WordShl(index_word, 1))));
- a->Bind(&i32);
- a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
- MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+ a.Bind(&i32);
+ a.Return(a.ChangeInt32ToTagged(a.AtomicLoad(
+ MachineType::Int32(), backing_store, a.WordShl(index_word, 2))));
- a->Bind(&u32);
- a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
- MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+ a.Bind(&u32);
+ a.Return(a.ChangeUint32ToTagged(a.AtomicLoad(
+ MachineType::Uint32(), backing_store, a.WordShl(index_word, 2))));
// This shouldn't happen, we've already validated the type.
- a->Bind(&other);
- a->Return(a->Int32Constant(0));
+ a.Bind(&other);
+ a.Return(a.SmiConstant(0));
}
-void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
- using namespace compiler;
- Node* array = a->Parameter(1);
- Node* index = a->Parameter(2);
- Node* value = a->Parameter(3);
- Node* context = a->Parameter(4 + 2);
+void Builtins::Generate_AtomicsStore(compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ CodeStubAssembler a(state);
+ Node* array = a.Parameter(1);
+ Node* index = a.Parameter(2);
+ Node* value = a.Parameter(3);
+ Node* context = a.Parameter(4 + 2);
Node* instance_type;
Node* backing_store;
- ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
- Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
- Node* array_length_word32 = a->TruncateTaggedToWord32(
- context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(a, index_word32, array_length_word32, context);
- Node* index_word = a->ChangeUint32ToWord(index_word32);
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+ Node* array_length_word32 = a.TruncateTaggedToWord32(
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+ Node* index_word = a.ChangeUint32ToWord(index_word32);
- Node* value_integer = a->ToInteger(context, value);
- Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+ Node* value_integer = a.ToInteger(context, value);
+ Node* value_word32 = a.TruncateTaggedToWord32(context, value_integer);
- CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+ CodeStubAssembler::Label u8(&a), u16(&a), u32(&a), other(&a);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -240,27 +243,27 @@ void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
CodeStubAssembler::Label* case_labels[] = {
&u8, &u8, &u16, &u16, &u32, &u32,
};
- a->Switch(instance_type, &other, case_values, case_labels,
- arraysize(case_labels));
+ a.Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
- a->Bind(&u8);
- a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
- value_word32);
- a->Return(value_integer);
+ a.Bind(&u8);
+ a.AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+ value_word32);
+ a.Return(value_integer);
- a->Bind(&u16);
- a->AtomicStore(MachineRepresentation::kWord16, backing_store,
- a->WordShl(index_word, 1), value_word32);
- a->Return(value_integer);
+ a.Bind(&u16);
+ a.AtomicStore(MachineRepresentation::kWord16, backing_store,
+ a.WordShl(index_word, 1), value_word32);
+ a.Return(value_integer);
- a->Bind(&u32);
- a->AtomicStore(MachineRepresentation::kWord32, backing_store,
- a->WordShl(index_word, 2), value_word32);
- a->Return(value_integer);
+ a.Bind(&u32);
+ a.AtomicStore(MachineRepresentation::kWord32, backing_store,
+ a.WordShl(index_word, 2), value_word32);
+ a.Return(value_integer);
// This shouldn't happen, we've already validated the type.
- a->Bind(&other);
- a->Return(a->Int32Constant(0));
+ a.Bind(&other);
+ a.Return(a.SmiConstant(0));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 4ccccbc859..3259d0021a 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/regexp/regexp-utils.h"
namespace v8 {
@@ -14,9 +14,55 @@ namespace internal {
typedef CodeStubAssembler::ResultMode ResultMode;
typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
-namespace {
+class StringBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit StringBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* LoadOneByteChar(Node* string, Node* index) {
+ return Load(MachineType::Uint8(), string, OneByteCharOffset(index));
+ }
+
+ Node* OneByteCharAddress(Node* string, Node* index) {
+ Node* offset = OneByteCharOffset(index);
+ return IntPtrAdd(BitcastTaggedToWord(string), offset);
+ }
+
+ Node* OneByteCharOffset(Node* index) {
+ return CharOffset(String::ONE_BYTE_ENCODING, index);
+ }
+
+ Node* CharOffset(String::Encoding encoding, Node* index) {
+ const int header = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* offset = index;
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset = IntPtrAdd(offset, offset);
+ }
+ offset = IntPtrAdd(offset, IntPtrConstant(header));
+ return offset;
+ }
+
+ void BranchIfSimpleOneByteStringInstanceType(Node* instance_type,
+ Label* if_true,
+ Label* if_false) {
+ const int kMask = kStringRepresentationMask | kStringEncodingMask;
+ const int kType = kOneByteStringTag | kSeqStringTag;
+ Branch(Word32Equal(Word32And(instance_type, Int32Constant(kMask)),
+ Int32Constant(kType)),
+ if_true, if_false);
+ }
+
+ void GenerateStringEqual(ResultMode mode);
+ void GenerateStringRelationalComparison(RelationalComparisonMode mode);
+
+ Node* ToSmiBetweenZeroAnd(Node* context, Node* value, Node* limit);
-void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
+ Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
+ UnicodeEncoding encoding);
+};
+
+void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -33,140 +79,89 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// }
// return %StringEqual(lhs, rhs);
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(this), if_notequal(this);
// Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
-
- assembler->Bind(&if_notsame);
+ GotoIf(WordEqual(lhs, rhs), &if_equal);
+
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = LoadStringLength(lhs);
+ Node* rhs_length = LoadStringLength(rhs);
+
+ // Strings with different lengths cannot be equal.
+ GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = Word32Or(
+ lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+ // Check if both {lhs} and {rhs} are internalized. Since we already know
+ // that they're not the same object, they're not equal in that case.
+ int const kBothInternalizedMask =
+ kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+ int const kBothInternalizedTag = kInternalizedTag | (kInternalizedTag << 8);
+ GotoIf(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothInternalizedMask)),
+ Int32Constant(kBothInternalizedTag)),
+ &if_notequal);
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+ Branch(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothSeqOneByteStringMask)),
+ Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ Bind(&if_bothonebyteseqstrings);
{
- // The {lhs} and {rhs} don't refer to the exact same String object.
+ // Compute the effective offset of the first character.
+ Node* begin =
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end = IntPtrAdd(begin, SmiUntag(lhs_length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(this, MachineType::PointerRepresentation());
+ Label loop(this, &var_offset);
+ var_offset.Bind(begin);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // If {offset} equals {end}, no difference was found, so the
+ // strings are equal.
+ Node* offset = var_offset.value();
+ GotoIf(WordEqual(offset, end), &if_equal);
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
- // Check if the lengths of {lhs} and {rhs} are equal.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
+ // Check if the characters match.
+ GotoIf(Word32NotEqual(lhs_value, rhs_value), &if_notequal);
- assembler->Bind(&if_lengthisequal);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check if both {lhs} and {rhs} are internalized.
- int const kBothInternalizedMask =
- kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
- int const kBothInternalizedTag =
- kInternalizedTag | (kInternalizedTag << 8);
- Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothInternalizedMask)),
- assembler->Int32Constant(kBothInternalizedTag)),
- &if_bothinternalized, &if_notbothinternalized);
-
- assembler->Bind(&if_bothinternalized);
- {
- // Fast negative check for internalized-to-internalized equality.
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_notbothinternalized);
- {
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(
- assembler->Word32Equal(
- assembler->Word32And(
- both_instance_types,
- assembler->Int32Constant(kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
- assembler->Bind(&if_bothonebyteseqstrings);
- {
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- Node* end =
- assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
-
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value =
- assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value =
- assembler->Load(MachineType::Uint8(), rhs, offset);
-
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
-
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
-
- assembler->Bind(&if_valueisnotsame);
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_done);
- assembler->Goto(&if_equal);
- }
+ // Advance to next character.
+ var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+ Goto(&loop);
+ }
}
- assembler->Bind(&if_notbothonebyteseqstrings);
+ Bind(&if_notbothonebyteseqstrings);
{
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string equality checks.
@@ -174,363 +169,328 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
(mode == ResultMode::kDontNegateResult)
? Runtime::kStringEqual
: Runtime::kStringNotEqual;
- assembler->TailCallRuntime(function_id, context, lhs, rhs);
+ TailCallRuntime(function_id, context, lhs, rhs);
}
- }
- }
- assembler->Bind(&if_lengthisnotequal);
- {
- // Mismatch in length of {lhs} and {rhs}, cannot be equal.
- assembler->Goto(&if_notequal);
- }
- }
-
- assembler->Bind(&if_equal);
- assembler->Return(
- assembler->BooleanConstant(mode == ResultMode::kDontNegateResult));
+ Bind(&if_equal);
+ Return(BooleanConstant(mode == ResultMode::kDontNegateResult));
- assembler->Bind(&if_notequal);
- assembler->Return(
- assembler->BooleanConstant(mode == ResultMode::kNegateResult));
+ Bind(&if_notequal);
+ Return(BooleanConstant(mode == ResultMode::kNegateResult));
}
+void StringBuiltinsAssembler::GenerateStringRelationalComparison(
+ RelationalComparisonMode mode) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
-void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
- RelationalComparisonMode mode) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+ Label if_less(this), if_equal(this), if_greater(this);
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ GotoIf(WordEqual(lhs, rhs), &if_equal);
+
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = Word32Or(
+ lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+ Branch(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothSeqOneByteStringMask)),
+ Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ Bind(&if_bothonebyteseqstrings);
+ {
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = LoadStringLength(lhs);
+ Node* rhs_length = LoadStringLength(rhs);
- Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+ // Determine the minimum length.
+ Node* length = SmiMin(lhs_length, rhs_length);
- // Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+ // Compute the effective offset of the first character.
+ Node* begin =
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
+ // Compute the first offset after the string from the length.
+ Node* end = IntPtrAdd(begin, SmiUntag(length));
- assembler->Bind(&if_notsame);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
- assembler->Bind(&if_bothonebyteseqstrings);
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(this, MachineType::PointerRepresentation());
+ Label loop(this, &var_offset);
+ var_offset.Bind(begin);
+ Goto(&loop);
+ Bind(&loop);
{
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
-
- // Determine the minimum length.
- Node* length = assembler->SmiMin(lhs_length, rhs_length);
-
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(this), if_notdone(this);
+ Branch(WordEqual(offset, end), &if_done, &if_notdone);
+
+ Bind(&if_notdone);
{
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
-
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
-
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
-
- assembler->Bind(&if_valueisnotsame);
- assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
- &if_less, &if_greater);
- }
+ // Check if the characters match.
+ Label if_valueissame(this), if_valueisnotsame(this);
+ Branch(Word32Equal(lhs_value, rhs_value), &if_valueissame,
+ &if_valueisnotsame);
- assembler->Bind(&if_done);
+ Bind(&if_valueissame);
{
- // All characters up to the min length are equal, decide based on
- // string length.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
-
- assembler->Bind(&if_lengthisequal);
- assembler->Goto(&if_equal);
-
- assembler->Bind(&if_lengthisnotequal);
- assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
- &if_greater);
+ // Advance to next character.
+ var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
}
+ Goto(&loop);
+
+ Bind(&if_valueisnotsame);
+ Branch(Uint32LessThan(lhs_value, rhs_value), &if_less, &if_greater);
+ }
+
+ Bind(&if_done);
+ {
+ // All characters up to the min length are equal, decide based on
+ // string length.
+ GotoIf(SmiEqual(lhs_length, rhs_length), &if_equal);
+ BranchIfSmiLessThan(lhs_length, rhs_length, &if_less, &if_greater);
}
}
+ }
- assembler->Bind(&if_notbothonebyteseqstrings);
+ Bind(&if_notbothonebyteseqstrings);
{
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string relational comparisons.
switch (mode) {
case RelationalComparisonMode::kLessThan:
- assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
- rhs);
+ TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
break;
case RelationalComparisonMode::kLessThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
- lhs, rhs);
+ TailCallRuntime(Runtime::kStringLessThanOrEqual, context, lhs, rhs);
break;
case RelationalComparisonMode::kGreaterThan:
- assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
- rhs);
+ TailCallRuntime(Runtime::kStringGreaterThan, context, lhs, rhs);
break;
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
- context, lhs, rhs);
+ TailCallRuntime(Runtime::kStringGreaterThanOrEqual, context, lhs,
+ rhs);
break;
}
}
- }
- assembler->Bind(&if_less);
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
- case RelationalComparisonMode::kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
- break;
+ Bind(&if_less);
+ switch (mode) {
+ case RelationalComparisonMode::kLessThan:
+ case RelationalComparisonMode::kLessThanOrEqual:
+ Return(BooleanConstant(true));
+ break;
- case RelationalComparisonMode::kGreaterThan:
- case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
- break;
+ case RelationalComparisonMode::kGreaterThan:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
+ Return(BooleanConstant(false));
+ break;
}
- assembler->Bind(&if_equal);
+ Bind(&if_equal);
switch (mode) {
case RelationalComparisonMode::kLessThan:
case RelationalComparisonMode::kGreaterThan:
- assembler->Return(assembler->BooleanConstant(false));
+ Return(BooleanConstant(false));
break;
case RelationalComparisonMode::kLessThanOrEqual:
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
+ Return(BooleanConstant(true));
break;
}
- assembler->Bind(&if_greater);
+ Bind(&if_greater);
switch (mode) {
case RelationalComparisonMode::kLessThan:
case RelationalComparisonMode::kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
+ Return(BooleanConstant(false));
break;
case RelationalComparisonMode::kGreaterThan:
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
+ Return(BooleanConstant(true));
break;
}
}
-} // namespace
-
-// static
-void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, ResultMode::kDontNegateResult);
+TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
+ GenerateStringEqual(ResultMode::kDontNegateResult);
}
-// static
-void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, ResultMode::kNegateResult);
+TF_BUILTIN(StringNotEqual, StringBuiltinsAssembler) {
+ GenerateStringEqual(ResultMode::kNegateResult);
}
-// static
-void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler,
- RelationalComparisonMode::kLessThan);
+TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
+ GenerateStringRelationalComparison(RelationalComparisonMode::kLessThan);
}
-// static
-void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
GenerateStringRelationalComparison(
- assembler, RelationalComparisonMode::kLessThanOrEqual);
+ RelationalComparisonMode::kLessThanOrEqual);
}
-// static
-void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler,
- RelationalComparisonMode::kGreaterThan);
+TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
+ GenerateStringRelationalComparison(RelationalComparisonMode::kGreaterThan);
}
-// static
-void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
GenerateStringRelationalComparison(
- assembler, RelationalComparisonMode::kGreaterThanOrEqual);
+ RelationalComparisonMode::kGreaterThanOrEqual);
+}
+
+TF_BUILTIN(StringCharAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+
+ // Load the character code at the {position} from the {receiver}.
+ Node* code = StringCharCodeAt(receiver, position,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ // And return the single character string with only that {code}
+ Node* result = StringFromCharCode(code);
+ Return(result);
+}
+
+TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+
+ // Load the character code at the {position} from the {receiver}.
+ Node* code = StringCharCodeAt(receiver, position,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ // And return it as TaggedSigned value.
+ // TODO(turbofan): Allow builtins to return values untagged.
+ Node* result = SmiFromWord32(code);
+ Return(result);
}
// -----------------------------------------------------------------------------
// ES6 section 21.1 String Objects
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
-void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc = assembler->ChangeInt32ToIntPtr(
- assembler->Parameter(BuiltinDescriptor::kArgumentsCount));
- Node* context = assembler->Parameter(BuiltinDescriptor::kContext);
-
- CodeStubArguments arguments(assembler, argc);
+ CodeStubArguments arguments(this, argc);
+ // From now on use word-size argc value.
+ argc = arguments.GetLength();
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
- Label if_oneargument(assembler), if_notoneargument(assembler);
- assembler->Branch(assembler->WordEqual(argc, assembler->IntPtrConstant(1)),
- &if_oneargument, &if_notoneargument);
+ Label if_oneargument(this), if_notoneargument(this);
+ Branch(WordEqual(argc, IntPtrConstant(1)), &if_oneargument,
+ &if_notoneargument);
- assembler->Bind(&if_oneargument);
+ Bind(&if_oneargument);
{
// Single argument case, perform fast single character string cache lookup
// for one-byte code units, or fall back to creating a single character
// string on the fly otherwise.
Node* code = arguments.AtIndex(0);
- Node* code32 = assembler->TruncateTaggedToWord32(context, code);
- Node* code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
- Node* result = assembler->StringFromCharCode(code16);
+ Node* code32 = TruncateTaggedToWord32(context, code);
+ Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+ Node* result = StringFromCharCode(code16);
arguments.PopAndReturn(result);
}
Node* code16 = nullptr;
- assembler->Bind(&if_notoneargument);
+ Bind(&if_notoneargument);
{
- Label two_byte(assembler);
+ Label two_byte(this);
// Assume that the resulting string contains only one-byte characters.
- Node* one_byte_result = assembler->AllocateSeqOneByteString(context, argc);
+ Node* one_byte_result = AllocateSeqOneByteString(context, argc);
- Variable max_index(assembler, MachineType::PointerRepresentation());
- max_index.Bind(assembler->IntPtrConstant(0));
+ Variable max_index(this, MachineType::PointerRepresentation());
+ max_index.Bind(IntPtrConstant(0));
// Iterate over the incoming arguments, converting them to 8-bit character
// codes. Stop if any of the conversions generates a code that doesn't fit
// in 8 bits.
- CodeStubAssembler::VariableList vars({&max_index}, assembler->zone());
- arguments.ForEach(vars, [context, &two_byte, &max_index, &code16,
- one_byte_result](CodeStubAssembler* assembler,
- Node* arg) {
- Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
- code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
-
- assembler->GotoIf(
- assembler->Int32GreaterThan(
- code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+ CodeStubAssembler::VariableList vars({&max_index}, zone());
+ arguments.ForEach(vars, [this, context, &two_byte, &max_index, &code16,
+ one_byte_result](Node* arg) {
+ Node* code32 = TruncateTaggedToWord32(context, arg);
+ code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+
+ GotoIf(
+ Int32GreaterThan(code16, Int32Constant(String::kMaxOneByteCharCode)),
&two_byte);
// The {code16} fits into the SeqOneByteString {one_byte_result}.
- Node* offset = assembler->ElementOffsetFromIndex(
+ Node* offset = ElementOffsetFromIndex(
max_index.value(), UINT8_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord8,
- one_byte_result, offset, code16);
- max_index.Bind(assembler->IntPtrAdd(max_index.value(),
- assembler->IntPtrConstant(1)));
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
+ offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
});
arguments.PopAndReturn(one_byte_result);
- assembler->Bind(&two_byte);
+ Bind(&two_byte);
// At least one of the characters in the string requires a 16-bit
// representation. Allocate a SeqTwoByteString to hold the resulting
// string.
- Node* two_byte_result = assembler->AllocateSeqTwoByteString(context, argc);
+ Node* two_byte_result = AllocateSeqTwoByteString(context, argc);
// Copy the characters that have already been put in the 8-bit string into
// their corresponding positions in the new 16-bit string.
- Node* zero = assembler->IntPtrConstant(0);
- assembler->CopyStringCharacters(
- one_byte_result, two_byte_result, zero, zero, max_index.value(),
- String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* zero = IntPtrConstant(0);
+ CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
+ max_index.value(), String::ONE_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::INTPTR_PARAMETERS);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset = assembler->ElementOffsetFromIndex(
- max_index.value(), UINT16_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
- two_byte_result, max_index_offset, code16);
- max_index.Bind(
- assembler->IntPtrAdd(max_index.value(), assembler->IntPtrConstant(1)));
+ Node* max_index_offset =
+ ElementOffsetFromIndex(max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+ max_index_offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
// using a 16-bit representation.
arguments.ForEach(
vars,
- [context, two_byte_result, &max_index](CodeStubAssembler* assembler,
- Node* arg) {
- Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
- Node* code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+ [this, context, two_byte_result, &max_index](Node* arg) {
+ Node* code32 = TruncateTaggedToWord32(context, arg);
+ Node* code16 =
+ Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
- Node* offset = assembler->ElementOffsetFromIndex(
+ Node* offset = ElementOffsetFromIndex(
max_index.value(), UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
- two_byte_result, offset, code16);
- max_index.Bind(assembler->IntPtrAdd(max_index.value(),
- assembler->IntPtrConstant(1)));
+ StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+ offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
},
max_index.value());
@@ -558,7 +518,7 @@ bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
}
uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
- Handle<Object> value = args.at<Object>(1 + index);
+ Handle<Object> value = args.at(1 + index);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
if (!IsValidCodePoint(isolate, value)) {
isolate->Throw(*isolate->factory()->NewRangeError(
@@ -632,91 +592,79 @@ BUILTIN(StringFromCodePoint) {
}
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* position = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+ Node* context = Parameter(4);
// Check that {receiver} is coercible to Object and convert it to a String.
- receiver =
- assembler->ToThisString(context, receiver, "String.prototype.charAt");
+ receiver = ToThisString(context, receiver, "String.prototype.charAt");
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
{
- Label return_emptystring(assembler, Label::kDeferred);
- position = assembler->ToInteger(context, position,
- CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->TaggedIsSmi(position),
- &return_emptystring);
+ Label return_emptystring(this, Label::kDeferred);
+ position =
+ ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+ GotoUnless(TaggedIsSmi(position), &return_emptystring);
// Determine the actual length of the {receiver} String.
- Node* receiver_length =
- assembler->LoadObjectField(receiver, String::kLengthOffset);
+ Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
// Return "" if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler);
- assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &return_emptystring, &if_positioninbounds);
+ Label if_positioninbounds(this);
+ Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring,
+ &if_positioninbounds);
- assembler->Bind(&return_emptystring);
- assembler->Return(assembler->EmptyStringConstant());
+ Bind(&return_emptystring);
+ Return(EmptyStringConstant());
- assembler->Bind(&if_positioninbounds);
+ Bind(&if_positioninbounds);
}
// Load the character code at the {position} from the {receiver}.
- Node* code = assembler->StringCharCodeAt(receiver, position);
+ Node* code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}.
- Node* result = assembler->StringFromCharCode(code);
- assembler->Return(result);
+ Node* result = StringFromCharCode(code);
+ Return(result);
}
// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-void Builtins::Generate_StringPrototypeCharCodeAt(
- CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* position = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+ Node* context = Parameter(4);
// Check that {receiver} is coercible to Object and convert it to a String.
- receiver =
- assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+ receiver = ToThisString(context, receiver, "String.prototype.charCodeAt");
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
{
- Label return_nan(assembler, Label::kDeferred);
- position = assembler->ToInteger(context, position,
- CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->TaggedIsSmi(position), &return_nan);
+ Label return_nan(this, Label::kDeferred);
+ position =
+ ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+ GotoUnless(TaggedIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
- Node* receiver_length =
- assembler->LoadObjectField(receiver, String::kLengthOffset);
+ Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
// Return NaN if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler);
- assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &return_nan, &if_positioninbounds);
+ Label if_positioninbounds(this);
+ Branch(SmiAboveOrEqual(position, receiver_length), &return_nan,
+ &if_positioninbounds);
- assembler->Bind(&return_nan);
- assembler->Return(assembler->NaNConstant());
+ Bind(&return_nan);
+ Return(NaNConstant());
- assembler->Bind(&if_positioninbounds);
+ Bind(&if_positioninbounds);
}
// Load the character at the {position} from the {receiver}.
- Node* value = assembler->StringCharCodeAt(receiver, position);
- Node* result = assembler->SmiFromWord32(value);
- assembler->Return(result);
+ Node* value = StringCharCodeAt(receiver, position);
+ Node* result = SmiFromWord32(value);
+ Return(result);
}
// ES6 section 21.1.3.6
@@ -750,16 +698,30 @@ BUILTIN(StringPrototypeEndsWith) {
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
- end = static_cast<uint32_t>(index);
+ end = str->ToValidIndex(*position);
}
int start = end - search_string->length();
if (start < 0) return isolate->heap()->false_value();
- FlatStringReader str_reader(isolate, String::Flatten(str));
- FlatStringReader search_reader(isolate, String::Flatten(search_string));
+ str = String::Flatten(str);
+ search_string = String::Flatten(search_string);
+
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+ String::FlatContent str_content = str->GetFlatContent();
+ String::FlatContent search_content = search_string->GetFlatContent();
+
+ if (str_content.IsOneByte() && search_content.IsOneByte()) {
+ Vector<const uint8_t> str_vector = str_content.ToOneByteVector();
+ Vector<const uint8_t> search_vector = search_content.ToOneByteVector();
+
+ return isolate->heap()->ToBoolean(memcmp(str_vector.start() + start,
+ search_vector.start(),
+ search_string->length()) == 0);
+ }
+
+ FlatStringReader str_reader(isolate, str);
+ FlatStringReader search_reader(isolate, search_string);
for (int i = 0; i < search_string->length(); i++) {
if (str_reader.Get(start + i) != search_reader.Get(i)) {
@@ -796,21 +758,137 @@ BUILTIN(StringPrototypeIncludes) {
isolate, position,
Object::ToInteger(isolate, args.atOrUndefined(isolate, 2)));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
-
- int index_in_str = String::IndexOf(isolate, str, search_string,
- static_cast<uint32_t>(index));
+ uint32_t index = str->ToValidIndex(*position);
+ int index_in_str = String::IndexOf(isolate, str, search_string, index);
return *isolate->factory()->ToBoolean(index_in_str != -1);
}
-// ES6 section 21.1.3.8 String.prototype.indexOf ( searchString [ , position ] )
-BUILTIN(StringPrototypeIndexOf) {
- HandleScope handle_scope(isolate);
+// ES6 #sec-string.prototype.indexof
+TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
+ Variable search_string(this, MachineRepresentation::kTagged),
+ position(this, MachineRepresentation::kTagged);
+ Label call_runtime(this), call_runtime_unchecked(this), argc_0(this),
+ no_argc_0(this), argc_1(this), no_argc_1(this), argc_2(this),
+ fast_path(this), return_minus_1(this);
+
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ CodeStubArguments arguments(this, argc);
+ Node* receiver = arguments.GetReceiver();
+ // From now on use word-size argc value.
+ argc = arguments.GetLength();
+
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &argc_0);
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(1)), &argc_1);
+ Goto(&argc_2);
+ Bind(&argc_0);
+ {
+ Comment("0 Argument case");
+ Node* undefined = UndefinedConstant();
+ search_string.Bind(undefined);
+ position.Bind(undefined);
+ Goto(&call_runtime);
+ }
+ Bind(&argc_1);
+ {
+ Comment("1 Argument case");
+ search_string.Bind(arguments.AtIndex(0));
+ position.Bind(SmiConstant(0));
+ Goto(&fast_path);
+ }
+ Bind(&argc_2);
+ {
+ Comment("2 Argument case");
+ search_string.Bind(arguments.AtIndex(0));
+ position.Bind(arguments.AtIndex(1));
+ GotoUnless(TaggedIsSmi(position.value()), &call_runtime);
+ position.Bind(SmiMax(position.value(), SmiConstant(0)));
+ Goto(&fast_path);
+ }
- return String::IndexOf(isolate, args.receiver(),
- args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2));
+ Bind(&fast_path);
+ {
+ Comment("Fast Path");
+ Label zero_length_needle(this);
+ GotoIf(TaggedIsSmi(receiver), &call_runtime);
+ Node* needle = search_string.value();
+ GotoIf(TaggedIsSmi(needle), &call_runtime);
+ Node* instance_type = LoadInstanceType(receiver);
+ GotoUnless(IsStringInstanceType(instance_type), &call_runtime);
+
+ Node* needle_instance_type = LoadInstanceType(needle);
+ GotoUnless(IsStringInstanceType(needle_instance_type), &call_runtime);
+
+ // At this point we know that the receiver and the needle are Strings and
+ // that position is a Smi.
+
+ Node* needle_length = SmiUntag(LoadStringLength(needle));
+ // Use possibly faster runtime fallback for long search strings.
+ GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
+ &call_runtime_unchecked);
+ Node* string_length = SmiUntag(LoadStringLength(receiver));
+ Node* start_position = SmiUntag(position.value());
+
+ GotoIf(IntPtrEqual(IntPtrConstant(0), needle_length), &zero_length_needle);
+ // Check that the needle fits in the start position.
+ GotoUnless(IntPtrLessThanOrEqual(needle_length,
+ IntPtrSub(string_length, start_position)),
+ &return_minus_1);
+ // Only support one-byte strings on the fast path.
+ Label check_needle(this), continue_fast_path(this);
+ BranchIfSimpleOneByteStringInstanceType(instance_type, &check_needle,
+ &call_runtime_unchecked);
+ Bind(&check_needle);
+ BranchIfSimpleOneByteStringInstanceType(
+ needle_instance_type, &continue_fast_path, &call_runtime_unchecked);
+ Bind(&continue_fast_path);
+ {
+ Node* needle_byte =
+ ChangeInt32ToIntPtr(LoadOneByteChar(needle, IntPtrConstant(0)));
+ Node* start_address = OneByteCharAddress(receiver, start_position);
+ Node* search_length = IntPtrSub(string_length, start_position);
+ // Call out to the highly optimized memchr to perform the actual byte
+ // search.
+ Node* memchr =
+ ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
+ Node* result_address =
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memchr,
+ start_address, needle_byte, search_length);
+ GotoIf(WordEqual(result_address, IntPtrConstant(0)), &return_minus_1);
+ Node* result_index =
+ IntPtrAdd(IntPtrSub(result_address, start_address), start_position);
+ arguments.PopAndReturn(SmiTag(result_index));
+ }
+ Bind(&zero_length_needle);
+ {
+ Comment("0-length needle");
+ arguments.PopAndReturn(SmiTag(IntPtrMin(string_length, start_position)));
+ }
+ }
+
+ Bind(&return_minus_1);
+ { arguments.PopAndReturn(SmiConstant(-1)); }
+
+ Bind(&call_runtime);
+ {
+ Comment("Call Runtime");
+ Node* result = CallRuntime(Runtime::kStringIndexOf, context, receiver,
+ search_string.value(), position.value());
+ arguments.PopAndReturn(result);
+ }
+
+ Bind(&call_runtime_unchecked);
+ {
+ // Simplified version of the runtime call where the types of the arguments
+ // are already known due to type checks in this stub.
+ Comment("Call Runtime Unchecked");
+ Node* result =
+ CallRuntime(Runtime::kStringIndexOfUnchecked, context, receiver,
+ search_string.value(), position.value());
+ arguments.PopAndReturn(result);
+ }
}
// ES6 section 21.1.3.9
@@ -834,8 +912,8 @@ BUILTIN(StringPrototypeLocaleCompare) {
TO_THIS_STRING(str1, "String.prototype.localeCompare");
Handle<String> str2;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
+ Object::ToString(isolate, args.at(1)));
if (str1.is_identical_to(str2)) return Smi::kZero; // Equal.
int str1_length = str1->length();
@@ -908,236 +986,220 @@ BUILTIN(StringPrototypeNormalize) {
}
// ES6 section B.2.3.1 String.prototype.substr ( start, length )
-void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
+ Label out(this), handle_length(this);
- Label out(a), handle_length(a);
+ Variable var_start(this, MachineRepresentation::kTagged);
+ Variable var_length(this, MachineRepresentation::kTagged);
- Variable var_start(a, MachineRepresentation::kTagged);
- Variable var_length(a, MachineRepresentation::kTagged);
+ Node* const receiver = Parameter(0);
+ Node* const start = Parameter(1);
+ Node* const length = Parameter(2);
+ Node* const context = Parameter(5);
- Node* const receiver = a->Parameter(0);
- Node* const start = a->Parameter(1);
- Node* const length = a->Parameter(2);
- Node* const context = a->Parameter(5);
-
- Node* const zero = a->SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(Smi::kZero);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
- a->ToThisString(context, receiver, "String.prototype.substr");
+ ToThisString(context, receiver, "String.prototype.substr");
- Node* const string_length = a->LoadStringLength(string);
+ Node* const string_length = LoadStringLength(string);
// Conversions and bounds-checks for {start}.
{
Node* const start_int =
- a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
- Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
- a->Branch(a->TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Node* const length_plus_start = a->SmiAdd(string_length, start_int);
- var_start.Bind(a->Select(a->SmiLessThan(start_int, zero),
- a->SmiMax(length_plus_start, zero), start_int));
- a->Goto(&handle_length);
+ Node* const length_plus_start = SmiAdd(string_length, start_int);
+ var_start.Bind(Select(SmiLessThan(start_int, zero),
+ [&] { return SmiMax(length_plus_start, zero); },
+ [&] { return start_int; },
+ MachineRepresentation::kTagged));
+ Goto(&handle_length);
}
- a->Bind(&if_isheapnumber);
+ Bind(&if_isheapnumber);
{
// If {start} is a heap number, it is definitely out of bounds. If it is
// negative, {start} = max({string_length} + {start}),0) = 0'. If it is
// positive, set {start} to {string_length} which ultimately results in
// returning an empty string.
- Node* const float_zero = a->Float64Constant(0.);
- Node* const start_float = a->LoadHeapNumberValue(start_int);
- var_start.Bind(a->Select(a->Float64LessThan(start_float, float_zero),
- zero, string_length));
- a->Goto(&handle_length);
+ Node* const float_zero = Float64Constant(0.);
+ Node* const start_float = LoadHeapNumberValue(start_int);
+ var_start.Bind(SelectTaggedConstant(
+ Float64LessThan(start_float, float_zero), zero, string_length));
+ Goto(&handle_length);
}
}
// Conversions and bounds-checks for {length}.
- a->Bind(&handle_length);
+ Bind(&handle_length);
{
- Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
// Default to {string_length} if {length} is undefined.
{
- Label if_isundefined(a, Label::kDeferred), if_isnotundefined(a);
- a->Branch(a->WordEqual(length, a->UndefinedConstant()), &if_isundefined,
- &if_isnotundefined);
+ Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
+ Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
+ &if_isnotundefined);
- a->Bind(&if_isundefined);
+ Bind(&if_isundefined);
var_length.Bind(string_length);
- a->Goto(&if_issmi);
+ Goto(&if_issmi);
- a->Bind(&if_isnotundefined);
+ Bind(&if_isnotundefined);
var_length.Bind(
- a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+ ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
}
- a->Branch(a->TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Node* const positive_length = a->SmiMax(var_length.value(), zero);
+ Node* const positive_length = SmiMax(var_length.value(), zero);
- Node* const minimal_length = a->SmiSub(string_length, var_start.value());
- var_length.Bind(a->SmiMin(positive_length, minimal_length));
+ Node* const minimal_length = SmiSub(string_length, var_start.value());
+ var_length.Bind(SmiMin(positive_length, minimal_length));
- a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
- a->Return(a->EmptyStringConstant());
+ GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ Return(EmptyStringConstant());
}
- a->Bind(&if_isheapnumber);
+ Bind(&if_isheapnumber);
{
// If {length} is a heap number, it is definitely out of bounds. There are
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(a, a->WordEqual(a->LoadMap(var_length.value()),
- a->HeapNumberMapConstant()));
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
- Label if_isnegative(a), if_ispositive(a);
- Node* const float_zero = a->Float64Constant(0.);
- Node* const length_float = a->LoadHeapNumberValue(var_length.value());
- a->Branch(a->Float64LessThan(length_float, float_zero), &if_isnegative,
- &if_ispositive);
+ Label if_isnegative(this), if_ispositive(this);
+ Node* const float_zero = Float64Constant(0.);
+ Node* const length_float = LoadHeapNumberValue(var_length.value());
+ Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
+ &if_ispositive);
- a->Bind(&if_isnegative);
- a->Return(a->EmptyStringConstant());
+ Bind(&if_isnegative);
+ Return(EmptyStringConstant());
- a->Bind(&if_ispositive);
+ Bind(&if_ispositive);
{
- var_length.Bind(a->SmiSub(string_length, var_start.value()));
- a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
- a->Return(a->EmptyStringConstant());
+ var_length.Bind(SmiSub(string_length, var_start.value()));
+ GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ Return(EmptyStringConstant());
}
}
}
- a->Bind(&out);
+ Bind(&out);
{
- Node* const end = a->SmiAdd(var_start.value(), var_length.value());
- Node* const result = a->SubString(context, string, var_start.value(), end);
- a->Return(result);
+ Node* const end = SmiAdd(var_start.value(), var_length.value());
+ Node* const result = SubString(context, string, var_start.value(), end);
+ Return(result);
}
}
-namespace {
-
-compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* value,
- compiler::Node* limit) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
+ Node* value,
+ Node* limit) {
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
Node* const value_int =
- a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+ this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
- Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
- a->Branch(a->TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
+ Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
+ Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Label if_isinbounds(a), if_isoutofbounds(a, Label::kDeferred);
- a->Branch(a->SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+ Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
+ Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
- a->Bind(&if_isinbounds);
+ Bind(&if_isinbounds);
{
var_result.Bind(value_int);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isoutofbounds);
+ Bind(&if_isoutofbounds);
{
- Node* const zero = a->SmiConstant(Smi::kZero);
- var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
- a->Goto(&out);
+ Node* const zero = SmiConstant(Smi::kZero);
+ var_result.Bind(
+ SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit));
+ Goto(&out);
}
}
- a->Bind(&if_isnotsmi);
+ Bind(&if_isnotsmi);
{
// {value} is a heap number - in this case, it is definitely out of bounds.
- CSA_ASSERT(a,
- a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
-
- Node* const float_zero = a->Float64Constant(0.);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
- Node* const value_float = a->LoadHeapNumberValue(value_int);
- var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
- smi_zero, limit));
- a->Goto(&out);
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value_int)));
+
+ Node* const float_zero = Float64Constant(0.);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const value_float = LoadHeapNumberValue(value_int);
+ var_result.Bind(SelectTaggedConstant(
+ Float64LessThan(value_float, float_zero), smi_zero, limit));
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
-void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
+ Label out(this);
- Label out(a);
+ Variable var_start(this, MachineRepresentation::kTagged);
+ Variable var_end(this, MachineRepresentation::kTagged);
- Variable var_start(a, MachineRepresentation::kTagged);
- Variable var_end(a, MachineRepresentation::kTagged);
-
- Node* const receiver = a->Parameter(0);
- Node* const start = a->Parameter(1);
- Node* const end = a->Parameter(2);
- Node* const context = a->Parameter(5);
+ Node* const receiver = Parameter(0);
+ Node* const start = Parameter(1);
+ Node* const end = Parameter(2);
+ Node* const context = Parameter(5);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
- a->ToThisString(context, receiver, "String.prototype.substring");
+ ToThisString(context, receiver, "String.prototype.substring");
- Node* const length = a->LoadStringLength(string);
+ Node* const length = LoadStringLength(string);
// Conversion and bounds-checks for {start}.
- var_start.Bind(ToSmiBetweenZeroAnd(a, context, start, length));
+ var_start.Bind(ToSmiBetweenZeroAnd(context, start, length));
// Conversion and bounds-checks for {end}.
{
var_end.Bind(length);
- a->GotoIf(a->WordEqual(end, a->UndefinedConstant()), &out);
+ GotoIf(WordEqual(end, UndefinedConstant()), &out);
- var_end.Bind(ToSmiBetweenZeroAnd(a, context, end, length));
+ var_end.Bind(ToSmiBetweenZeroAnd(context, end, length));
- Label if_endislessthanstart(a);
- a->Branch(a->SmiLessThan(var_end.value(), var_start.value()),
- &if_endislessthanstart, &out);
+ Label if_endislessthanstart(this);
+ Branch(SmiLessThan(var_end.value(), var_start.value()),
+ &if_endislessthanstart, &out);
- a->Bind(&if_endislessthanstart);
+ Bind(&if_endislessthanstart);
{
Node* const tmp = var_end.value();
var_end.Bind(var_start.value());
var_start.Bind(tmp);
- a->Goto(&out);
+ Goto(&out);
}
}
- a->Bind(&out);
+ Bind(&out);
{
Node* result =
- a->SubString(context, string, var_start.value(), var_end.value());
- a->Return(result);
+ SubString(context, string, var_start.value(), var_end.value());
+ Return(result);
}
}
@@ -1170,9 +1232,7 @@ BUILTIN(StringPrototypeStartsWith) {
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
- start = static_cast<uint32_t>(index);
+ start = str->ToValidIndex(*position);
}
if (start + search_string->length() > str->length()) {
@@ -1191,15 +1251,13 @@ BUILTIN(StringPrototypeStartsWith) {
}
// ES6 section 21.1.3.25 String.prototype.toString ()
-void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+TF_BUILTIN(StringPrototypeToString, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kString, "String.prototype.toString");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+ "String.prototype.toString");
+ Return(result);
}
// ES6 section 21.1.3.27 String.prototype.trim ()
@@ -1224,103 +1282,82 @@ BUILTIN(StringPrototypeTrimRight) {
}
// ES6 section 21.1.3.28 String.prototype.valueOf ( )
-void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(StringPrototypeValueOf, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kString, "String.prototype.valueOf");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+ "String.prototype.valueOf");
+ Return(result);
}
-void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* string = assembler->ToThisString(context, receiver,
- "String.prototype[Symbol.iterator]");
-
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map = assembler->LoadFixedArrayElement(
- native_context,
- assembler->IntPtrConstant(Context::STRING_ITERATOR_MAP_INDEX), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* iterator = assembler->Allocate(JSStringIterator::kSize);
- assembler->StoreMapNoWriteBarrier(iterator, map);
- assembler->StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kStringOffset, string);
- Node* index = assembler->SmiConstant(Smi::kZero);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kNextIndexOffset, index);
- assembler->Return(iterator);
-}
+TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
-namespace {
+ Node* string =
+ ToThisString(context, receiver, "String.prototype[Symbol.iterator]");
+
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
+ LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
+ Node* iterator = Allocate(JSStringIterator::kSize);
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
+ string);
+ Node* index = SmiConstant(Smi::kZero);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+ index);
+ Return(iterator);
+}
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
-compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
- compiler::Node* string,
- compiler::Node* length,
- compiler::Node* index,
- UnicodeEncoding encoding) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
- Label handle_surrogate_pair(assembler), return_result(assembler);
- Variable var_result(assembler, MachineRepresentation::kWord32);
- Variable var_trail(assembler, MachineRepresentation::kWord16);
- var_result.Bind(assembler->StringCharCodeAt(string, index));
- var_trail.Bind(assembler->Int32Constant(0));
-
- assembler->GotoIf(assembler->Word32NotEqual(
- assembler->Word32And(var_result.value(),
- assembler->Int32Constant(0xFC00)),
- assembler->Int32Constant(0xD800)),
- &return_result);
- Node* next_index =
- assembler->SmiAdd(index, assembler->SmiConstant(Smi::FromInt(1)));
-
- assembler->GotoUnless(assembler->SmiLessThan(next_index, length),
- &return_result);
- var_trail.Bind(assembler->StringCharCodeAt(string, next_index));
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(var_trail.value(),
- assembler->Int32Constant(0xFC00)),
- assembler->Int32Constant(0xDC00)),
- &handle_surrogate_pair, &return_result);
-
- assembler->Bind(&handle_surrogate_pair);
+compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
+ compiler::Node* string, compiler::Node* length, compiler::Node* index,
+ UnicodeEncoding encoding) {
+ Label handle_surrogate_pair(this), return_result(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
+ Variable var_trail(this, MachineRepresentation::kWord32);
+ var_result.Bind(StringCharCodeAt(string, index));
+ var_trail.Bind(Int32Constant(0));
+
+ GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
+ Int32Constant(0xD800)),
+ &return_result);
+ Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
+
+ GotoUnless(SmiLessThan(next_index, length), &return_result);
+ var_trail.Bind(StringCharCodeAt(string, next_index));
+ Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
+ Int32Constant(0xDC00)),
+ &handle_surrogate_pair, &return_result);
+
+ Bind(&handle_surrogate_pair);
{
Node* lead = var_result.value();
Node* trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
- CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
- lead, assembler->Int32Constant(0xD800)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
- lead, assembler->Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
- trail, assembler->Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
- trail, assembler->Int32Constant(0xE000)));
+ CSA_SLOW_ASSERT(this,
+ Uint32GreaterThanOrEqual(lead, Int32Constant(0xD800)));
+ CSA_SLOW_ASSERT(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(this,
+ Uint32GreaterThanOrEqual(trail, Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(this, Uint32LessThan(trail, Int32Constant(0xE000)));
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result.Bind(assembler->WordOr(
+ var_result.Bind(Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
- assembler->WordShl(lead, assembler->Int32Constant(16)), trail));
+ Word32Shl(lead, Int32Constant(16)), trail));
#else
- assembler->WordShl(trail, assembler->Int32Constant(16)), lead));
+ Word32Shl(trail, Int32Constant(16)), lead));
#endif
break;
@@ -1328,107 +1365,85 @@ compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
// Convert UTF16 surrogate pair into |word32| code point, encoded as
// UTF32.
Node* surrogate_offset =
- assembler->Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result.Bind(assembler->Int32Add(
- assembler->WordShl(lead, assembler->Int32Constant(10)),
- assembler->Int32Add(trail, surrogate_offset)));
+ var_result.Bind(Int32Add(WordShl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset)));
break;
}
}
- assembler->Goto(&return_result);
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
+ Bind(&return_result);
return var_result.value();
}
-compiler::Node* LoadSurrogatePairAt(CodeStubAssembler* assembler,
- compiler::Node* string,
- compiler::Node* length,
- compiler::Node* index) {
- return LoadSurrogatePairInternal(assembler, string, length, index,
- UnicodeEncoding::UTF16);
-}
-
-} // namespace
-
-void Builtins::Generate_StringIteratorPrototypeNext(
- CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Variable var_done(this, MachineRepresentation::kTagged);
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Variable var_done(assembler, MachineRepresentation::kTagged);
+ var_value.Bind(UndefinedConstant());
+ var_done.Bind(BooleanConstant(true));
- var_value.Bind(assembler->UndefinedConstant());
- var_done.Bind(assembler->BooleanConstant(true));
+ Label throw_bad_receiver(this), next_codepoint(this), return_result(this);
- Label throw_bad_receiver(assembler), next_codepoint(assembler),
- return_result(assembler);
+ Node* iterator = Parameter(0);
+ Node* context = Parameter(3);
- Node* iterator = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
+ GotoUnless(Word32Equal(LoadInstanceType(iterator),
+ Int32Constant(JS_STRING_ITERATOR_TYPE)),
+ &throw_bad_receiver);
- assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadInstanceType(iterator),
- assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
- &throw_bad_receiver);
-
- Node* string =
- assembler->LoadObjectField(iterator, JSStringIterator::kStringOffset);
+ Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
Node* position =
- assembler->LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
- Node* length = assembler->LoadObjectField(string, String::kLengthOffset);
+ LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
+ Node* length = LoadObjectField(string, String::kLengthOffset);
- assembler->Branch(assembler->SmiLessThan(position, length), &next_codepoint,
- &return_result);
+ Branch(SmiLessThan(position, length), &next_codepoint, &return_result);
- assembler->Bind(&next_codepoint);
+ Bind(&next_codepoint);
{
- Node* ch = LoadSurrogatePairAt(assembler, string, length, position);
- Node* value = assembler->StringFromCodePoint(ch, UnicodeEncoding::UTF16);
+ UnicodeEncoding encoding = UnicodeEncoding::UTF16;
+ Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
+ Node* value = StringFromCodePoint(ch, encoding);
var_value.Bind(value);
- Node* length = assembler->LoadObjectField(value, String::kLengthOffset);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kNextIndexOffset,
- assembler->SmiAdd(position, length));
- var_done.Bind(assembler->BooleanConstant(false));
- assembler->Goto(&return_result);
+ Node* length = LoadObjectField(value, String::kLengthOffset);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+ SmiAdd(position, length));
+ var_done.Bind(BooleanConstant(false));
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
+ Bind(&return_result);
{
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map = assembler->LoadFixedArrayElement(
- native_context,
- assembler->IntPtrConstant(Context::ITERATOR_RESULT_MAP_INDEX), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* result = assembler->Allocate(JSIteratorResult::kSize);
- assembler->StoreMapNoWriteBarrier(result, map);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSIteratorResult::kValueOffset, var_value.value());
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSIteratorResult::kDoneOffset, var_done.value());
- assembler->Return(result);
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* result = Allocate(JSIteratorResult::kSize);
+ StoreMapNoWriteBarrier(result, map);
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
+ var_value.value());
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
+ var_done.value());
+ Return(result);
}
- assembler->Bind(&throw_bad_receiver);
+ Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler->CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "String Iterator.prototype.next", TENURED)),
- iterator);
- assembler->Return(result); // Never reached.
+ Node* result =
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(
+ "String Iterator.prototype.next", TENURED)),
+ iterator);
+ Return(result); // Never reached.
}
}
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 8dd8a1fa27..6067edba6d 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -32,44 +33,81 @@ BUILTIN(SymbolConstructor_ConstructStub) {
isolate->factory()->Symbol_string()));
}
+// ES6 section 19.4.2.1 Symbol.for.
+BUILTIN(SymbolFor) {
+ HandleScope scope(isolate);
+ Handle<Object> key_obj = args.atOrUndefined(isolate, 1);
+ Handle<String> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToString(isolate, key_obj));
+ return *isolate->SymbolFor(Heap::kPublicSymbolTableRootIndex, key, false);
+}
+
+// ES6 section 19.4.2.5 Symbol.keyFor.
+BUILTIN(SymbolKeyFor) {
+ HandleScope scope(isolate);
+ Handle<Object> obj = args.atOrUndefined(isolate, 1);
+ if (!obj->IsSymbol()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolKeyFor, obj));
+ }
+ Handle<Symbol> symbol = Handle<Symbol>::cast(obj);
+ DisallowHeapAllocation no_gc;
+ Object* result;
+ if (symbol->is_public()) {
+ result = symbol->name();
+ DCHECK(result->IsString());
+ } else {
+ result = isolate->heap()->undefined_value();
+ }
+ DCHECK_EQ(isolate->heap()->public_symbol_table()->SlowReverseLookup(*symbol),
+ result);
+ return result;
+}
+
// ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
void Builtins::Generate_SymbolPrototypeToPrimitive(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(4);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(4);
Node* result =
- assembler->ToThisValue(context, receiver, PrimitiveType::kSymbol,
- "Symbol.prototype [ @@toPrimitive ]");
- assembler->Return(result);
+ assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype [ @@toPrimitive ]");
+ assembler.Return(result);
}
// ES6 section 19.4.3.2 Symbol.prototype.toString ( )
-void Builtins::Generate_SymbolPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeToString(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* value = assembler->ToThisValue(
- context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString");
+ Node* value = assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype.toString");
Node* result =
- assembler->CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
- assembler->Return(result);
+ assembler.CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+ assembler.Return(result);
}
// ES6 section 19.4.3.3 Symbol.prototype.valueOf ( )
-void Builtins::Generate_SymbolPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* result = assembler->ToThisValue(
+ Node* result = assembler.ToThisValue(
context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf");
- assembler->Return(result);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 94173fa613..ab1ebbc69e 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -20,48 +21,48 @@ BUILTIN(TypedArrayPrototypeBuffer) {
namespace {
-void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
- const char* method_name,
- int object_offset) {
+void Generate_TypedArrayPrototypeGetter(compiler::CodeAssemblerState* state,
+ const char* method_name,
+ int object_offset) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
// Check if the {receiver} is actually a JSTypedArray.
- Label if_receiverisincompatible(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver),
- &if_receiverisincompatible);
- Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
- assembler->GotoUnless(
- assembler->Word32Equal(receiver_instance_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Label if_receiverisincompatible(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_receiverisincompatible);
+ Node* receiver_instance_type = assembler.LoadInstanceType(receiver);
+ assembler.GotoUnless(
+ assembler.Word32Equal(receiver_instance_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&if_receiverisincompatible);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
- assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+ assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
+ assembler.Return(assembler.LoadObjectField(receiver, object_offset));
- assembler->Bind(&if_receiverisneutered);
+ assembler.Bind(&if_receiverisneutered);
{
// The {receiver}s buffer was neutered, default to zero.
- assembler->Return(assembler->SmiConstant(0));
+ assembler.Return(assembler.SmiConstant(0));
}
- assembler->Bind(&if_receiverisincompatible);
+ assembler.Bind(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler->CallRuntime(
+ Node* result = assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
method_name, TENURED)),
receiver);
- assembler->Return(result); // Never reached.
+ assembler.Return(result); // Never reached.
}
}
@@ -69,100 +70,101 @@ void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
void Builtins::Generate_TypedArrayPrototypeByteLength(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.byteLength",
- JSTypedArray::kByteLengthOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state,
+ "get TypedArray.prototype.byteLength",
+ JSTypedArray::kByteLengthOffset);
}
// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
void Builtins::Generate_TypedArrayPrototypeByteOffset(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.byteOffset",
- JSTypedArray::kByteOffsetOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state,
+ "get TypedArray.prototype.byteOffset",
+ JSTypedArray::kByteOffsetOffset);
}
// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
void Builtins::Generate_TypedArrayPrototypeLength(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.length",
- JSTypedArray::kLengthOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state, "get TypedArray.prototype.length",
+ JSTypedArray::kLengthOffset);
}
namespace {
template <IterationKind kIterationKind>
-void Generate_TypedArrayPrototypeIterationMethod(CodeStubAssembler* assembler,
- const char* method_name) {
+void Generate_TypedArrayPrototypeIterationMethod(
+ compiler::CodeAssemblerState* state, const char* method_name) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Label throw_bad_receiver(assembler, Label::kDeferred);
- Label throw_typeerror(assembler, Label::kDeferred);
+ Label throw_bad_receiver(&assembler, Label::kDeferred);
+ Label throw_typeerror(&assembler, Label::kDeferred);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &throw_bad_receiver);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &throw_bad_receiver);
- Node* map = assembler->LoadMap(receiver);
- Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(
- assembler->Word32NotEqual(instance_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Node* map = assembler.LoadMap(receiver);
+ Node* instance_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(
+ assembler.Word32NotEqual(instance_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
+ assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
- assembler->Return(assembler->CreateArrayIterator(receiver, map, instance_type,
- context, kIterationKind));
+ assembler.Return(assembler.CreateArrayIterator(receiver, map, instance_type,
+ context, kIterationKind));
- Variable var_message(assembler, MachineRepresentation::kTagged);
- assembler->Bind(&throw_bad_receiver);
+ Variable var_message(&assembler, MachineRepresentation::kTagged);
+ assembler.Bind(&throw_bad_receiver);
var_message.Bind(
- assembler->SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
- assembler->Goto(&throw_typeerror);
+ assembler.SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
+ assembler.Goto(&throw_typeerror);
- assembler->Bind(&if_receiverisneutered);
- var_message.Bind(assembler->SmiConstant(
- Smi::FromInt(MessageTemplate::kDetachedOperation)));
- assembler->Goto(&throw_typeerror);
+ assembler.Bind(&if_receiverisneutered);
+ var_message.Bind(
+ assembler.SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ assembler.Goto(&throw_typeerror);
- assembler->Bind(&throw_typeerror);
+ assembler.Bind(&throw_typeerror);
{
- Node* arg1 = assembler->HeapConstant(
- assembler->isolate()->factory()->NewStringFromAsciiChecked(method_name,
- TENURED));
- Node* result = assembler->CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), arg1);
- assembler->Return(result);
+ Node* arg1 = assembler.HeapConstant(
+ assembler.isolate()->factory()->NewStringFromAsciiChecked(method_name,
+ TENURED));
+ Node* result = assembler.CallRuntime(Runtime::kThrowTypeError, context,
+ var_message.value(), arg1);
+ assembler.Return(result);
}
}
} // namespace
void Builtins::Generate_TypedArrayPrototypeValues(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
- assembler, "%TypedArray%.prototype.values()");
+ state, "%TypedArray%.prototype.values()");
}
void Builtins::Generate_TypedArrayPrototypeEntries(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
- assembler, "%TypedArray%.prototype.entries()");
+ state, "%TypedArray%.prototype.entries()");
}
-void Builtins::Generate_TypedArrayPrototypeKeys(CodeStubAssembler* assembler) {
+void Builtins::Generate_TypedArrayPrototypeKeys(
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
- assembler, "%TypedArray%.prototype.keys()");
+ state, "%TypedArray%.prototype.keys()");
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 6378fdfad5..be689ac038 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,11 +8,14 @@
#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
+namespace compiler {
+class CodeAssemblerState;
+}
+
// Arguments object passed to C++ builtins.
class BuiltinArguments : public Arguments {
public:
@@ -27,7 +30,7 @@ class BuiltinArguments : public Arguments {
return Arguments::operator[](index);
}
- template <class S>
+ template <class S = Object>
Handle<S> at(int index) {
DCHECK_LT(index, length());
return Arguments::at<S>(index);
@@ -102,6 +105,31 @@ class BuiltinArguments : public Arguments {
Isolate* isolate)
// ----------------------------------------------------------------------------
+// Support macro for defining builtins with Turbofan.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+// TF_BUILTIN(name, code_assember_base_class) {
+// ...
+// }
+//
+// In the body of the builtin function the arguments can be accessed
+// as "Parameter(n)".
+#define TF_BUILTIN(Name, AssemblerBase) \
+ class Name##Assembler : public AssemblerBase { \
+ public: \
+ explicit Name##Assembler(compiler::CodeAssemblerState* state) \
+ : AssemblerBase(state) {} \
+ void Generate##NameImpl(); \
+ }; \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ Name##Assembler assembler(state); \
+ assembler.Generate##NameImpl(); \
+ } \
+ void Name##Assembler::Generate##NameImpl()
+
+// ----------------------------------------------------------------------------
#define CHECK_RECEIVER(Type, name, method) \
if (!args.receiver()->Is##Type()) { \
@@ -117,8 +145,7 @@ class BuiltinArguments : public Arguments {
// or converts the receiver to a String otherwise and assigns it to a new var
// with the given {name}.
#define TO_THIS_STRING(name, method) \
- if (args.receiver()->IsNull(isolate) || \
- args.receiver()->IsUndefined(isolate)) { \
+ if (args.receiver()->IsNullOrUndefined(isolate)) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, \
NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ec981fe01e..5997eb3550 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins.h"
#include "src/code-events.h"
-#include "src/code-stub-assembler.h"
+#include "src/compiler/code-assembler.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
@@ -42,7 +42,7 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
}
typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
-typedef void (*CodeAssemblerGenerator)(CodeStubAssembler*);
+typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
Code* BuildWithMacroAssembler(Isolate* isolate,
MacroAssemblerGenerator generator,
@@ -86,9 +86,10 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- CodeStubAssembler assembler(isolate, &zone, argc_with_recv, flags, name);
- generator(&assembler);
- Handle<Code> code = assembler.GenerateCode();
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, flags,
+ name);
+ generator(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -105,9 +106,9 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- CodeStubAssembler assembler(isolate, &zone, descriptor, flags, name);
- generator(&assembler);
- Handle<Code> code = assembler.GenerateCode();
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name);
+ generator(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index a6b126d106..a21b272f20 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -29,9 +29,7 @@ namespace internal {
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
- V(Make##C##CodeYoungAgainOddMarking) \
- V(Make##C##CodeYoungAgainEvenMarking)
+#define DECLARE_CODE_AGE_BUILTIN(C, V) V(Make##C##CodeYoungAgain)
// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
// Args: name
@@ -47,658 +45,734 @@ namespace internal {
// Args: name, code kind, extra IC state
// DBG: Builtin in platform-dependent assembly, used by the debugger.
// Args: name
-#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
- ASM(Abort) \
- /* Code aging */ \
- CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
- \
- TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
- \
- /* Calls */ \
- ASM(ArgumentsAdaptorTrampoline) \
- /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallFunction_ReceiverIsNullOrUndefined) \
- ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(CallFunction_ReceiverIsAny) \
- ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsAny) \
- /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallBoundFunction) \
- ASM(TailCallBoundFunction) \
- /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
- ASM(Call_ReceiverIsNullOrUndefined) \
- ASM(Call_ReceiverIsNotNullOrUndefined) \
- ASM(Call_ReceiverIsAny) \
- ASM(TailCall_ReceiverIsNullOrUndefined) \
- ASM(TailCall_ReceiverIsNotNullOrUndefined) \
- ASM(TailCall_ReceiverIsAny) \
- \
- /* Construct */ \
- /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
- ASM(ConstructFunction) \
- /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
- ASM(ConstructBoundFunction) \
- ASM(ConstructedNonConstructable) \
- /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
- ASM(ConstructProxy) \
- /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
- ASM(Construct) \
- ASM(JSConstructStubApi) \
- ASM(JSConstructStubGeneric) \
- ASM(JSBuiltinsConstructStub) \
- ASM(JSBuiltinsConstructStubForDerived) \
- \
- /* Apply and entries */ \
- ASM(Apply) \
- ASM(JSEntryTrampoline) \
- ASM(JSConstructEntryTrampoline) \
- ASM(ResumeGeneratorTrampoline) \
- \
- /* Stack and interrupt check */ \
- ASM(InterruptCheck) \
- ASM(StackCheck) \
- \
- /* String helpers */ \
- TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- \
- /* Interpreter */ \
- ASM(InterpreterEntryTrampoline) \
- ASM(InterpreterPushArgsAndCall) \
- ASM(InterpreterPushArgsAndCallFunction) \
- ASM(InterpreterPushArgsAndTailCall) \
- ASM(InterpreterPushArgsAndTailCallFunction) \
- ASM(InterpreterPushArgsAndConstruct) \
- ASM(InterpreterPushArgsAndConstructFunction) \
- ASM(InterpreterPushArgsAndConstructArray) \
- ASM(InterpreterEnterBytecodeAdvance) \
- ASM(InterpreterEnterBytecodeDispatch) \
- ASM(InterpreterOnStackReplacement) \
- \
- /* Code life-cycle */ \
- ASM(CompileLazy) \
- ASM(CompileBaseline) \
- ASM(CompileOptimized) \
- ASM(CompileOptimizedConcurrent) \
- ASM(InOptimizationQueue) \
- ASM(InstantiateAsmJs) \
- ASM(MarkCodeAsToBeExecutedOnce) \
- ASM(MarkCodeAsExecutedOnce) \
- ASM(MarkCodeAsExecutedTwice) \
- ASM(NotifyDeoptimized) \
- ASM(NotifySoftDeoptimized) \
- ASM(NotifyLazyDeoptimized) \
- ASM(NotifyStubFailure) \
- ASM(NotifyStubFailureSaveDoubles) \
- ASM(OnStackReplacement) \
- \
- /* API callback handling */ \
- API(HandleApiCall) \
- API(HandleApiCallAsFunction) \
- API(HandleApiCallAsConstructor) \
- ASM(HandleFastApiCall) \
- \
- /* Adapters for Turbofan into runtime */ \
- ASM(AllocateInNewSpace) \
- ASM(AllocateInOldSpace) \
- \
- /* TurboFan support builtins */ \
- TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- CopyFastSmiOrObjectElements) \
- TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
- TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- GrowArrayElements) \
- \
- /* Debugger */ \
- DBG(FrameDropper_LiveEdit) \
- DBG(Return_DebugBreak) \
- DBG(Slot_DebugBreak) \
- \
- /* Type conversions */ \
- TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
- \
- /* Handlers */ \
- TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
- LoadWithVector) \
- ASM(KeyedLoadIC_Miss) \
- ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
- ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
- ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
- StoreICState::kStrictModeState) \
- TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
- StoreWithVector) \
- TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
- StoreICState::kStrictModeState, StoreWithVector) \
- ASM(KeyedStoreIC_Miss) \
- ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
- TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
- TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
- ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
- TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC) \
- TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
- ASH(StoreIC_Normal, HANDLER, Code::STORE_IC) \
- ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
- TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
- TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
- \
- /* Built-in functions for Javascript */ \
- /* Special internal builtins */ \
- CPP(EmptyFunction) \
- CPP(Illegal) \
- CPP(RestrictedFunctionPropertiesThrower) \
- CPP(RestrictedStrictArgumentsPropertiesThrower) \
- CPP(UnsupportedThrower) \
- \
- /* Array */ \
- ASM(ArrayCode) \
- ASM(InternalArrayCode) \
- CPP(ArrayConcat) \
- /* ES6 section 22.1.2.2 Array.isArray */ \
- TFJ(ArrayIsArray, 1) \
- /* ES7 #sec-array.prototype.includes */ \
- TFJ(ArrayIncludes, 2) \
- TFJ(ArrayIndexOf, 2) \
- CPP(ArrayPop) \
- CPP(ArrayPush) \
- CPP(ArrayShift) \
- CPP(ArraySlice) \
- CPP(ArraySplice) \
- CPP(ArrayUnshift) \
- /* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0) \
- /* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0) \
- /* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0) \
- /* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0) \
- \
- /* ArrayBuffer */ \
- CPP(ArrayBufferConstructor) \
- CPP(ArrayBufferConstructor_ConstructStub) \
- CPP(ArrayBufferPrototypeGetByteLength) \
- CPP(ArrayBufferIsView) \
- \
- /* Boolean */ \
- CPP(BooleanConstructor) \
- CPP(BooleanConstructor_ConstructStub) \
- /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
- TFJ(BooleanPrototypeToString, 0) \
- /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
- TFJ(BooleanPrototypeValueOf, 0) \
- \
- /* CallSite */ \
- CPP(CallSitePrototypeGetColumnNumber) \
- CPP(CallSitePrototypeGetEvalOrigin) \
- CPP(CallSitePrototypeGetFileName) \
- CPP(CallSitePrototypeGetFunction) \
- CPP(CallSitePrototypeGetFunctionName) \
- CPP(CallSitePrototypeGetLineNumber) \
- CPP(CallSitePrototypeGetMethodName) \
- CPP(CallSitePrototypeGetPosition) \
- CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
- CPP(CallSitePrototypeGetThis) \
- CPP(CallSitePrototypeGetTypeName) \
- CPP(CallSitePrototypeIsConstructor) \
- CPP(CallSitePrototypeIsEval) \
- CPP(CallSitePrototypeIsNative) \
- CPP(CallSitePrototypeIsToplevel) \
- CPP(CallSitePrototypeToString) \
- \
- /* DataView */ \
- CPP(DataViewConstructor) \
- CPP(DataViewConstructor_ConstructStub) \
- CPP(DataViewPrototypeGetBuffer) \
- CPP(DataViewPrototypeGetByteLength) \
- CPP(DataViewPrototypeGetByteOffset) \
- CPP(DataViewPrototypeGetInt8) \
- CPP(DataViewPrototypeSetInt8) \
- CPP(DataViewPrototypeGetUint8) \
- CPP(DataViewPrototypeSetUint8) \
- CPP(DataViewPrototypeGetInt16) \
- CPP(DataViewPrototypeSetInt16) \
- CPP(DataViewPrototypeGetUint16) \
- CPP(DataViewPrototypeSetUint16) \
- CPP(DataViewPrototypeGetInt32) \
- CPP(DataViewPrototypeSetInt32) \
- CPP(DataViewPrototypeGetUint32) \
- CPP(DataViewPrototypeSetUint32) \
- CPP(DataViewPrototypeGetFloat32) \
- CPP(DataViewPrototypeSetFloat32) \
- CPP(DataViewPrototypeGetFloat64) \
- CPP(DataViewPrototypeSetFloat64) \
- \
- /* Date */ \
- CPP(DateConstructor) \
- CPP(DateConstructor_ConstructStub) \
- /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
- TFJ(DatePrototypeGetDate, 0) \
- /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
- TFJ(DatePrototypeGetDay, 0) \
- /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
- TFJ(DatePrototypeGetFullYear, 0) \
- /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
- TFJ(DatePrototypeGetHours, 0) \
- /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
- TFJ(DatePrototypeGetMilliseconds, 0) \
- /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
- TFJ(DatePrototypeGetMinutes, 0) \
- /* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
- TFJ(DatePrototypeGetMonth, 0) \
- /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
- TFJ(DatePrototypeGetSeconds, 0) \
- /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
- TFJ(DatePrototypeGetTime, 0) \
- /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0) \
- /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
- TFJ(DatePrototypeGetUTCDate, 0) \
- /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
- TFJ(DatePrototypeGetUTCDay, 0) \
- /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
- TFJ(DatePrototypeGetUTCFullYear, 0) \
- /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
- TFJ(DatePrototypeGetUTCHours, 0) \
- /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0) \
- /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
- TFJ(DatePrototypeGetUTCMinutes, 0) \
- /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
- TFJ(DatePrototypeGetUTCMonth, 0) \
- /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
- TFJ(DatePrototypeGetUTCSeconds, 0) \
- CPP(DatePrototypeGetYear) \
- CPP(DatePrototypeSetYear) \
- CPP(DateNow) \
- CPP(DateParse) \
- CPP(DatePrototypeSetDate) \
- CPP(DatePrototypeSetFullYear) \
- CPP(DatePrototypeSetHours) \
- CPP(DatePrototypeSetMilliseconds) \
- CPP(DatePrototypeSetMinutes) \
- CPP(DatePrototypeSetMonth) \
- CPP(DatePrototypeSetSeconds) \
- CPP(DatePrototypeSetTime) \
- CPP(DatePrototypeSetUTCDate) \
- CPP(DatePrototypeSetUTCFullYear) \
- CPP(DatePrototypeSetUTCHours) \
- CPP(DatePrototypeSetUTCMilliseconds) \
- CPP(DatePrototypeSetUTCMinutes) \
- CPP(DatePrototypeSetUTCMonth) \
- CPP(DatePrototypeSetUTCSeconds) \
- CPP(DatePrototypeToDateString) \
- CPP(DatePrototypeToISOString) \
- CPP(DatePrototypeToPrimitive) \
- CPP(DatePrototypeToUTCString) \
- CPP(DatePrototypeToString) \
- CPP(DatePrototypeToTimeString) \
- CPP(DatePrototypeValueOf) \
- CPP(DatePrototypeToJson) \
- CPP(DateUTC) \
- \
- /* Error */ \
- CPP(ErrorConstructor) \
- CPP(ErrorCaptureStackTrace) \
- CPP(ErrorPrototypeToString) \
- CPP(MakeError) \
- CPP(MakeRangeError) \
- CPP(MakeSyntaxError) \
- CPP(MakeTypeError) \
- CPP(MakeURIError) \
- \
- /* Function */ \
- CPP(FunctionConstructor) \
- ASM(FunctionPrototypeApply) \
- CPP(FunctionPrototypeBind) \
- ASM(FunctionPrototypeCall) \
- /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
- TFJ(FunctionPrototypeHasInstance, 1) \
- CPP(FunctionPrototypeToString) \
- \
- /* Generator and Async */ \
- CPP(GeneratorFunctionConstructor) \
- /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
- TFJ(GeneratorPrototypeNext, 1) \
- /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
- TFJ(GeneratorPrototypeReturn, 1) \
- /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
- TFJ(GeneratorPrototypeThrow, 1) \
- CPP(AsyncFunctionConstructor) \
- \
- /* Global object */ \
- CPP(GlobalDecodeURI) \
- CPP(GlobalDecodeURIComponent) \
- CPP(GlobalEncodeURI) \
- CPP(GlobalEncodeURIComponent) \
- CPP(GlobalEscape) \
- CPP(GlobalUnescape) \
- CPP(GlobalEval) \
- /* ES6 section 18.2.2 isFinite ( number ) */ \
- TFJ(GlobalIsFinite, 1) \
- /* ES6 section 18.2.3 isNaN ( number ) */ \
- TFJ(GlobalIsNaN, 1) \
- \
- /* ES6 #sec-%iteratorprototype%-@@iterator */ \
- TFJ(IteratorPrototypeIterator, 0) \
- \
- /* JSON */ \
- CPP(JsonParse) \
- CPP(JsonStringify) \
- \
- /* Math */ \
- /* ES6 section 20.2.2.1 Math.abs ( x ) */ \
- TFJ(MathAbs, 1) \
- /* ES6 section 20.2.2.2 Math.acos ( x ) */ \
- TFJ(MathAcos, 1) \
- /* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
- TFJ(MathAcosh, 1) \
- /* ES6 section 20.2.2.4 Math.asin ( x ) */ \
- TFJ(MathAsin, 1) \
- /* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
- TFJ(MathAsinh, 1) \
- /* ES6 section 20.2.2.6 Math.atan ( x ) */ \
- TFJ(MathAtan, 1) \
- /* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
- TFJ(MathAtanh, 1) \
- /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
- TFJ(MathAtan2, 2) \
- /* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
- TFJ(MathCbrt, 1) \
- /* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
- TFJ(MathCeil, 1) \
- /* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
- TFJ(MathClz32, 1) \
- /* ES6 section 20.2.2.12 Math.cos ( x ) */ \
- TFJ(MathCos, 1) \
- /* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
- TFJ(MathCosh, 1) \
- /* ES6 section 20.2.2.14 Math.exp ( x ) */ \
- TFJ(MathExp, 1) \
- /* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
- TFJ(MathExpm1, 1) \
- /* ES6 section 20.2.2.16 Math.floor ( x ) */ \
- TFJ(MathFloor, 1) \
- /* ES6 section 20.2.2.17 Math.fround ( x ) */ \
- TFJ(MathFround, 1) \
- /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
- CPP(MathHypot) \
- /* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
- TFJ(MathImul, 2) \
- /* ES6 section 20.2.2.20 Math.log ( x ) */ \
- TFJ(MathLog, 1) \
- /* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
- TFJ(MathLog1p, 1) \
- /* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
- TFJ(MathLog10, 1) \
- /* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
- TFJ(MathLog2, 1) \
- /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
- ASM(MathMax) \
- /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
- ASM(MathMin) \
- /* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
- TFJ(MathPow, 2) \
- /* ES6 section 20.2.2.27 Math.random */ \
- TFJ(MathRandom, 0) \
- /* ES6 section 20.2.2.28 Math.round ( x ) */ \
- TFJ(MathRound, 1) \
- /* ES6 section 20.2.2.29 Math.sign ( x ) */ \
- TFJ(MathSign, 1) \
- /* ES6 section 20.2.2.30 Math.sin ( x ) */ \
- TFJ(MathSin, 1) \
- /* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
- TFJ(MathSinh, 1) \
- /* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
- TFJ(MathTan, 1) \
- /* ES6 section 20.2.2.33 Math.tan ( x ) */ \
- TFJ(MathTanh, 1) \
- /* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
- TFJ(MathSqrt, 1) \
- /* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
- TFJ(MathTrunc, 1) \
- \
- /* Number */ \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
- ASM(NumberConstructor) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
- ASM(NumberConstructor_ConstructStub) \
- /* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
- TFJ(NumberIsFinite, 1) \
- /* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
- TFJ(NumberIsInteger, 1) \
- /* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
- TFJ(NumberIsNaN, 1) \
- /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
- TFJ(NumberIsSafeInteger, 1) \
- /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */ \
- TFJ(NumberParseFloat, 1) \
- /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */ \
- TFJ(NumberParseInt, 2) \
- CPP(NumberPrototypeToExponential) \
- CPP(NumberPrototypeToFixed) \
- CPP(NumberPrototypeToLocaleString) \
- CPP(NumberPrototypeToPrecision) \
- CPP(NumberPrototypeToString) \
- /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
- TFJ(NumberPrototypeValueOf, 0) \
- TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
- TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
- \
- /* Object */ \
- CPP(ObjectAssign) \
- TFJ(ObjectCreate, 2) \
- CPP(ObjectDefineGetter) \
- CPP(ObjectDefineProperties) \
- CPP(ObjectDefineProperty) \
- CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
- CPP(ObjectFreeze) \
- CPP(ObjectGetOwnPropertyDescriptor) \
- CPP(ObjectGetOwnPropertyDescriptors) \
- CPP(ObjectGetOwnPropertyNames) \
- CPP(ObjectGetOwnPropertySymbols) \
- CPP(ObjectGetPrototypeOf) \
- CPP(ObjectSetPrototypeOf) \
- /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
- TFJ(ObjectHasOwnProperty, 1) \
- CPP(ObjectIs) \
- CPP(ObjectIsExtensible) \
- CPP(ObjectIsFrozen) \
- CPP(ObjectIsSealed) \
- CPP(ObjectKeys) \
- CPP(ObjectLookupGetter) \
- CPP(ObjectLookupSetter) \
- CPP(ObjectPreventExtensions) \
- /* ES6 section 19.1.3.6 Object.prototype.toString () */ \
- TFJ(ObjectProtoToString, 0) \
- CPP(ObjectPrototypePropertyIsEnumerable) \
- CPP(ObjectPrototypeGetProto) \
- CPP(ObjectPrototypeSetProto) \
- CPP(ObjectSeal) \
- CPP(ObjectValues) \
- \
- TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
- TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
- TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
- TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
- \
- /* Promise */ \
- CPP(CreateResolvingFunctions) \
- CPP(PromiseResolveClosure) \
- CPP(PromiseRejectClosure) \
- \
- /* Proxy */ \
- CPP(ProxyConstructor) \
- CPP(ProxyConstructor_ConstructStub) \
- \
- /* Reflect */ \
- ASM(ReflectApply) \
- ASM(ReflectConstruct) \
- CPP(ReflectDefineProperty) \
- CPP(ReflectDeleteProperty) \
- CPP(ReflectGet) \
- CPP(ReflectGetOwnPropertyDescriptor) \
- CPP(ReflectGetPrototypeOf) \
- CPP(ReflectHas) \
- CPP(ReflectIsExtensible) \
- CPP(ReflectOwnKeys) \
- CPP(ReflectPreventExtensions) \
- CPP(ReflectSet) \
- CPP(ReflectSetPrototypeOf) \
- \
- /* RegExp */ \
- CPP(RegExpCapture1Getter) \
- CPP(RegExpCapture2Getter) \
- CPP(RegExpCapture3Getter) \
- CPP(RegExpCapture4Getter) \
- CPP(RegExpCapture5Getter) \
- CPP(RegExpCapture6Getter) \
- CPP(RegExpCapture7Getter) \
- CPP(RegExpCapture8Getter) \
- CPP(RegExpCapture9Getter) \
- CPP(RegExpConstructor) \
- TFJ(RegExpInternalMatch, 2) \
- CPP(RegExpInputGetter) \
- CPP(RegExpInputSetter) \
- CPP(RegExpLastMatchGetter) \
- CPP(RegExpLastParenGetter) \
- CPP(RegExpLeftContextGetter) \
- CPP(RegExpPrototypeCompile) \
- TFJ(RegExpPrototypeExec, 1) \
- TFJ(RegExpPrototypeFlagsGetter, 0) \
- TFJ(RegExpPrototypeGlobalGetter, 0) \
- TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
- CPP(RegExpPrototypeMatch) \
- TFJ(RegExpPrototypeMultilineGetter, 0) \
- TFJ(RegExpPrototypeReplace, 2) \
- TFJ(RegExpPrototypeSearch, 1) \
- CPP(RegExpPrototypeSourceGetter) \
- CPP(RegExpPrototypeSpeciesGetter) \
- CPP(RegExpPrototypeSplit) \
- TFJ(RegExpPrototypeStickyGetter, 0) \
- TFJ(RegExpPrototypeTest, 1) \
- CPP(RegExpPrototypeToString) \
- TFJ(RegExpPrototypeUnicodeGetter, 0) \
- CPP(RegExpRightContextGetter) \
- \
- /* SharedArrayBuffer */ \
- CPP(SharedArrayBufferPrototypeGetByteLength) \
- TFJ(AtomicsLoad, 2) \
- TFJ(AtomicsStore, 3) \
- \
- /* String */ \
- ASM(StringConstructor) \
- ASM(StringConstructor_ConstructStub) \
- CPP(StringFromCodePoint) \
- /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
- TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
- TFJ(StringPrototypeCharAt, 1) \
- /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
- TFJ(StringPrototypeCharCodeAt, 1) \
- /* ES6 section 21.1.3.6 */ \
- /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \
- CPP(StringPrototypeEndsWith) \
- /* ES6 section 21.1.3.7 */ \
- /* String.prototype.includes ( searchString [ , position ] ) */ \
- CPP(StringPrototypeIncludes) \
- /* ES6 section 21.1.3.8 */ \
- /* String.prototype.indexOf ( searchString [ , position ] ) */ \
- CPP(StringPrototypeIndexOf) \
- /* ES6 section 21.1.3.9 */ \
- /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
- CPP(StringPrototypeLastIndexOf) \
- /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */ \
- CPP(StringPrototypeLocaleCompare) \
- /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
- CPP(StringPrototypeNormalize) \
- /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
- TFJ(StringPrototypeSubstr, 2) \
- /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
- TFJ(StringPrototypeSubstring, 2) \
- /* ES6 section 21.1.3.20 */ \
- /* String.prototype.startsWith ( searchString [ , position ] ) */ \
- CPP(StringPrototypeStartsWith) \
- /* ES6 section 21.1.3.25 String.prototype.toString () */ \
- TFJ(StringPrototypeToString, 0) \
- CPP(StringPrototypeTrim) \
- CPP(StringPrototypeTrimLeft) \
- CPP(StringPrototypeTrimRight) \
- /* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
- TFJ(StringPrototypeValueOf, 0) \
- /* ES6 #sec-string.prototype-@@iterator */ \
- TFJ(StringPrototypeIterator, 0) \
- \
- /* StringIterator */ \
- TFJ(StringIteratorPrototypeNext, 0) \
- \
- /* Symbol */ \
- CPP(SymbolConstructor) \
- CPP(SymbolConstructor_ConstructStub) \
- /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
- TFJ(SymbolPrototypeToPrimitive, 1) \
- /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
- TFJ(SymbolPrototypeToString, 0) \
- /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
- TFJ(SymbolPrototypeValueOf, 0) \
- \
- /* TypedArray */ \
- CPP(TypedArrayPrototypeBuffer) \
- /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
- TFJ(TypedArrayPrototypeByteLength, 0) \
- /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0) \
- /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0) \
- /* ES6 #sec-%typedarray%.prototype.entries */ \
- TFJ(TypedArrayPrototypeEntries, 0) \
- /* ES6 #sec-%typedarray%.prototype.keys */ \
- TFJ(TypedArrayPrototypeKeys, 0) \
- /* ES6 #sec-%typedarray%.prototype.values */ \
- TFJ(TypedArrayPrototypeValues, 0) \
- \
- CPP(ModuleNamespaceIterator) \
- CPP(FixedArrayIteratorNext)
+#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
+ ASM(Abort) \
+ /* Code aging */ \
+ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
+ \
+ /* Declared first for dependency reasons */ \
+ ASM(CompileLazy) \
+ TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(FastNewObject, BUILTIN, kNoExtraICState, FastNewObject) \
+ \
+ /* Calls */ \
+ ASM(ArgumentsAdaptorTrampoline) \
+ /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallFunction_ReceiverIsNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsAny) \
+ ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsAny) \
+ /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallBoundFunction) \
+ ASM(TailCallBoundFunction) \
+ /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
+ ASM(Call_ReceiverIsNullOrUndefined) \
+ ASM(Call_ReceiverIsNotNullOrUndefined) \
+ ASM(Call_ReceiverIsAny) \
+ ASM(TailCall_ReceiverIsNullOrUndefined) \
+ ASM(TailCall_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCall_ReceiverIsAny) \
+ \
+ /* Construct */ \
+ /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructFunction) \
+ /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
+ ASM(ConstructBoundFunction) \
+ ASM(ConstructedNonConstructable) \
+ /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructProxy) \
+ /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
+ ASM(Construct) \
+ ASM(JSConstructStubApi) \
+ ASM(JSConstructStubGeneric) \
+ ASM(JSBuiltinsConstructStub) \
+ ASM(JSBuiltinsConstructStubForDerived) \
+ TFS(FastNewClosure, BUILTIN, kNoExtraICState, FastNewClosure) \
+ TFS(FastNewFunctionContextEval, BUILTIN, kNoExtraICState, \
+ FastNewFunctionContext) \
+ TFS(FastNewFunctionContextFunction, BUILTIN, kNoExtraICState, \
+ FastNewFunctionContext) \
+ TFS(FastCloneRegExp, BUILTIN, kNoExtraICState, FastCloneRegExp) \
+ TFS(FastCloneShallowArrayTrack, BUILTIN, kNoExtraICState, \
+ FastCloneShallowArray) \
+ TFS(FastCloneShallowArrayDontTrack, BUILTIN, kNoExtraICState, \
+ FastCloneShallowArray) \
+ TFS(FastCloneShallowObject0, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject1, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject2, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject3, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject4, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject5, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject6, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ \
+ /* Apply and entries */ \
+ ASM(Apply) \
+ ASM(JSEntryTrampoline) \
+ ASM(JSConstructEntryTrampoline) \
+ ASM(ResumeGeneratorTrampoline) \
+ \
+ /* Stack and interrupt check */ \
+ ASM(InterruptCheck) \
+ ASM(StackCheck) \
+ \
+ /* String helpers */ \
+ TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringCharAt, BUILTIN, kNoExtraICState, StringCharAt) \
+ TFS(StringCharCodeAt, BUILTIN, kNoExtraICState, StringCharCodeAt) \
+ \
+ /* Interpreter */ \
+ ASM(InterpreterEntryTrampoline) \
+ ASM(InterpreterPushArgsAndCall) \
+ ASM(InterpreterPushArgsAndCallFunction) \
+ ASM(InterpreterPushArgsAndTailCall) \
+ ASM(InterpreterPushArgsAndTailCallFunction) \
+ ASM(InterpreterPushArgsAndConstruct) \
+ ASM(InterpreterPushArgsAndConstructFunction) \
+ ASM(InterpreterPushArgsAndConstructArray) \
+ ASM(InterpreterEnterBytecodeAdvance) \
+ ASM(InterpreterEnterBytecodeDispatch) \
+ ASM(InterpreterOnStackReplacement) \
+ \
+ /* Code life-cycle */ \
+ ASM(CompileBaseline) \
+ ASM(CompileOptimized) \
+ ASM(CompileOptimizedConcurrent) \
+ ASM(InOptimizationQueue) \
+ ASM(InstantiateAsmJs) \
+ ASM(MarkCodeAsToBeExecutedOnce) \
+ ASM(MarkCodeAsExecutedOnce) \
+ ASM(MarkCodeAsExecutedTwice) \
+ ASM(NotifyDeoptimized) \
+ ASM(NotifySoftDeoptimized) \
+ ASM(NotifyLazyDeoptimized) \
+ ASM(NotifyStubFailure) \
+ ASM(NotifyStubFailureSaveDoubles) \
+ ASM(OnStackReplacement) \
+ \
+ /* API callback handling */ \
+ API(HandleApiCall) \
+ API(HandleApiCallAsFunction) \
+ API(HandleApiCallAsConstructor) \
+ ASM(HandleFastApiCall) \
+ \
+ /* Adapters for Turbofan into runtime */ \
+ ASM(AllocateInNewSpace) \
+ ASM(AllocateInOldSpace) \
+ \
+ /* TurboFan support builtins */ \
+ TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ CopyFastSmiOrObjectElements) \
+ TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
+ TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ GrowArrayElements) \
+ TFS(NewUnmappedArgumentsElements, BUILTIN, kNoExtraICState, \
+ NewArgumentsElements) \
+ TFS(NewRestParameterElements, BUILTIN, kNoExtraICState, \
+ NewArgumentsElements) \
+ \
+ /* Debugger */ \
+ DBG(FrameDropper_LiveEdit) \
+ DBG(Return_DebugBreak) \
+ DBG(Slot_DebugBreak) \
+ \
+ /* Type conversions */ \
+ TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
+ TFS(GetSuperConstructor, BUILTIN, kNoExtraICState, TypeConversion) \
+ \
+ /* Handlers */ \
+ TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
+ LoadWithVector) \
+ TFS(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ TFS(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC, LoadWithVector) \
+ TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
+ StoreWithVector) \
+ TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState, StoreWithVector) \
+ ASM(KeyedStoreIC_Miss) \
+ ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
+ TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
+ TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+ ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
+ TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ TFS(LoadIC_Normal, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
+ TFS(StoreIC_Normal, HANDLER, Code::STORE_IC, StoreWithVector) \
+ ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
+ TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
+ TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
+ \
+ /* Built-in functions for Javascript */ \
+ /* Special internal builtins */ \
+ CPP(EmptyFunction) \
+ CPP(Illegal) \
+ CPP(RestrictedFunctionPropertiesThrower) \
+ CPP(RestrictedStrictArgumentsPropertiesThrower) \
+ CPP(UnsupportedThrower) \
+ TFJ(ReturnReceiver, 0) \
+ \
+ /* Array */ \
+ ASM(ArrayCode) \
+ ASM(InternalArrayCode) \
+ CPP(ArrayConcat) \
+ /* ES6 section 22.1.2.2 Array.isArray */ \
+ TFJ(ArrayIsArray, 1) \
+ /* ES7 #sec-array.prototype.includes */ \
+ TFJ(ArrayIncludes, 2) \
+ TFJ(ArrayIndexOf, 2) \
+ CPP(ArrayPop) \
+ CPP(ArrayPush) \
+ TFJ(FastArrayPush, -1) \
+ CPP(ArrayShift) \
+ CPP(ArraySlice) \
+ CPP(ArraySplice) \
+ CPP(ArrayUnshift) \
+ /* ES6 #sec-array.prototype.entries */ \
+ TFJ(ArrayPrototypeEntries, 0) \
+ /* ES6 #sec-array.prototype.keys */ \
+ TFJ(ArrayPrototypeKeys, 0) \
+ /* ES6 #sec-array.prototype.values */ \
+ TFJ(ArrayPrototypeValues, 0) \
+ /* ES6 #sec-%arrayiteratorprototype%.next */ \
+ TFJ(ArrayIteratorPrototypeNext, 0) \
+ \
+ /* ArrayBuffer */ \
+ CPP(ArrayBufferConstructor) \
+ CPP(ArrayBufferConstructor_ConstructStub) \
+ CPP(ArrayBufferPrototypeGetByteLength) \
+ CPP(ArrayBufferIsView) \
+ \
+ /* Boolean */ \
+ CPP(BooleanConstructor) \
+ CPP(BooleanConstructor_ConstructStub) \
+ /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
+ TFJ(BooleanPrototypeToString, 0) \
+ /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
+ TFJ(BooleanPrototypeValueOf, 0) \
+ \
+ /* CallSite */ \
+ CPP(CallSitePrototypeGetColumnNumber) \
+ CPP(CallSitePrototypeGetEvalOrigin) \
+ CPP(CallSitePrototypeGetFileName) \
+ CPP(CallSitePrototypeGetFunction) \
+ CPP(CallSitePrototypeGetFunctionName) \
+ CPP(CallSitePrototypeGetLineNumber) \
+ CPP(CallSitePrototypeGetMethodName) \
+ CPP(CallSitePrototypeGetPosition) \
+ CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
+ CPP(CallSitePrototypeGetThis) \
+ CPP(CallSitePrototypeGetTypeName) \
+ CPP(CallSitePrototypeIsConstructor) \
+ CPP(CallSitePrototypeIsEval) \
+ CPP(CallSitePrototypeIsNative) \
+ CPP(CallSitePrototypeIsToplevel) \
+ CPP(CallSitePrototypeToString) \
+ \
+ /* DataView */ \
+ CPP(DataViewConstructor) \
+ CPP(DataViewConstructor_ConstructStub) \
+ CPP(DataViewPrototypeGetBuffer) \
+ CPP(DataViewPrototypeGetByteLength) \
+ CPP(DataViewPrototypeGetByteOffset) \
+ CPP(DataViewPrototypeGetInt8) \
+ CPP(DataViewPrototypeSetInt8) \
+ CPP(DataViewPrototypeGetUint8) \
+ CPP(DataViewPrototypeSetUint8) \
+ CPP(DataViewPrototypeGetInt16) \
+ CPP(DataViewPrototypeSetInt16) \
+ CPP(DataViewPrototypeGetUint16) \
+ CPP(DataViewPrototypeSetUint16) \
+ CPP(DataViewPrototypeGetInt32) \
+ CPP(DataViewPrototypeSetInt32) \
+ CPP(DataViewPrototypeGetUint32) \
+ CPP(DataViewPrototypeSetUint32) \
+ CPP(DataViewPrototypeGetFloat32) \
+ CPP(DataViewPrototypeSetFloat32) \
+ CPP(DataViewPrototypeGetFloat64) \
+ CPP(DataViewPrototypeSetFloat64) \
+ \
+ /* Date */ \
+ CPP(DateConstructor) \
+ CPP(DateConstructor_ConstructStub) \
+ /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
+ TFJ(DatePrototypeGetDate, 0) \
+ /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
+ TFJ(DatePrototypeGetDay, 0) \
+ /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
+ TFJ(DatePrototypeGetFullYear, 0) \
+ /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
+ TFJ(DatePrototypeGetHours, 0) \
+ /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
+ TFJ(DatePrototypeGetMilliseconds, 0) \
+ /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
+ TFJ(DatePrototypeGetMinutes, 0) \
+ /* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
+ TFJ(DatePrototypeGetMonth, 0) \
+ /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
+ TFJ(DatePrototypeGetSeconds, 0) \
+ /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
+ TFJ(DatePrototypeGetTime, 0) \
+ /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
+ TFJ(DatePrototypeGetTimezoneOffset, 0) \
+ /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
+ TFJ(DatePrototypeGetUTCDate, 0) \
+ /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
+ TFJ(DatePrototypeGetUTCDay, 0) \
+ /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
+ TFJ(DatePrototypeGetUTCFullYear, 0) \
+ /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
+ TFJ(DatePrototypeGetUTCHours, 0) \
+ /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
+ TFJ(DatePrototypeGetUTCMilliseconds, 0) \
+ /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
+ TFJ(DatePrototypeGetUTCMinutes, 0) \
+ /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
+ TFJ(DatePrototypeGetUTCMonth, 0) \
+ /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
+ TFJ(DatePrototypeGetUTCSeconds, 0) \
+ /* ES6 section 20.3.4.44 Date.prototype.valueOf ( ) */ \
+ TFJ(DatePrototypeValueOf, 0) \
+ /* ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint ) */ \
+ TFJ(DatePrototypeToPrimitive, 1) \
+ CPP(DatePrototypeGetYear) \
+ CPP(DatePrototypeSetYear) \
+ CPP(DateNow) \
+ CPP(DateParse) \
+ CPP(DatePrototypeSetDate) \
+ CPP(DatePrototypeSetFullYear) \
+ CPP(DatePrototypeSetHours) \
+ CPP(DatePrototypeSetMilliseconds) \
+ CPP(DatePrototypeSetMinutes) \
+ CPP(DatePrototypeSetMonth) \
+ CPP(DatePrototypeSetSeconds) \
+ CPP(DatePrototypeSetTime) \
+ CPP(DatePrototypeSetUTCDate) \
+ CPP(DatePrototypeSetUTCFullYear) \
+ CPP(DatePrototypeSetUTCHours) \
+ CPP(DatePrototypeSetUTCMilliseconds) \
+ CPP(DatePrototypeSetUTCMinutes) \
+ CPP(DatePrototypeSetUTCMonth) \
+ CPP(DatePrototypeSetUTCSeconds) \
+ CPP(DatePrototypeToDateString) \
+ CPP(DatePrototypeToISOString) \
+ CPP(DatePrototypeToUTCString) \
+ CPP(DatePrototypeToString) \
+ CPP(DatePrototypeToTimeString) \
+ CPP(DatePrototypeToJson) \
+ CPP(DateUTC) \
+ \
+ /* Error */ \
+ CPP(ErrorConstructor) \
+ CPP(ErrorCaptureStackTrace) \
+ CPP(ErrorPrototypeToString) \
+ CPP(MakeError) \
+ CPP(MakeRangeError) \
+ CPP(MakeSyntaxError) \
+ CPP(MakeTypeError) \
+ CPP(MakeURIError) \
+ \
+ /* Function */ \
+ CPP(FunctionConstructor) \
+ ASM(FunctionPrototypeApply) \
+ CPP(FunctionPrototypeBind) \
+ TFJ(FastFunctionPrototypeBind, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ ASM(FunctionPrototypeCall) \
+ /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
+ TFJ(FunctionPrototypeHasInstance, 1) \
+ CPP(FunctionPrototypeToString) \
+ \
+ /* Belongs to Objects but is a dependency of GeneratorPrototypeResume */ \
+ TFS(CreateIterResultObject, BUILTIN, kNoExtraICState, \
+ CreateIterResultObject) \
+ \
+ /* Generator and Async */ \
+ CPP(GeneratorFunctionConstructor) \
+ /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
+ TFJ(GeneratorPrototypeNext, 1) \
+ /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
+ TFJ(GeneratorPrototypeReturn, 1) \
+ /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
+ TFJ(GeneratorPrototypeThrow, 1) \
+ CPP(AsyncFunctionConstructor) \
+ \
+ /* Global object */ \
+ CPP(GlobalDecodeURI) \
+ CPP(GlobalDecodeURIComponent) \
+ CPP(GlobalEncodeURI) \
+ CPP(GlobalEncodeURIComponent) \
+ CPP(GlobalEscape) \
+ CPP(GlobalUnescape) \
+ CPP(GlobalEval) \
+ /* ES6 section 18.2.2 isFinite ( number ) */ \
+ TFJ(GlobalIsFinite, 1) \
+ /* ES6 section 18.2.3 isNaN ( number ) */ \
+ TFJ(GlobalIsNaN, 1) \
+ \
+ /* JSON */ \
+ CPP(JsonParse) \
+ CPP(JsonStringify) \
+ \
+ /* ICs */ \
+ TFS(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector) \
+ TFS(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load) \
+ TFS(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector) \
+ TFS(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load) \
+ TFS(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector) \
+ TFS(StoreICTrampoline, STORE_IC, kNoExtraICState, Store) \
+ TFS(StoreICStrict, STORE_IC, StoreICState::kStrictModeState, \
+ StoreWithVector) \
+ TFS(StoreICStrictTrampoline, STORE_IC, StoreICState::kStrictModeState, \
+ Store) \
+ TFS(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector) \
+ TFS(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store) \
+ TFS(KeyedStoreICStrict, KEYED_STORE_IC, StoreICState::kStrictModeState, \
+ StoreWithVector) \
+ TFS(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState, Store) \
+ TFS(LoadGlobalIC, LOAD_GLOBAL_IC, LoadGlobalICState::kNotInsideTypeOfState, \
+ LoadGlobalWithVector) \
+ TFS(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kInsideTypeOfState, LoadGlobalWithVector) \
+ TFS(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kNotInsideTypeOfState, LoadGlobal) \
+ TFS(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kInsideTypeOfState, LoadGlobal) \
+ \
+ /* Math */ \
+ /* ES6 section 20.2.2.1 Math.abs ( x ) */ \
+ TFJ(MathAbs, 1) \
+ /* ES6 section 20.2.2.2 Math.acos ( x ) */ \
+ TFJ(MathAcos, 1) \
+ /* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
+ TFJ(MathAcosh, 1) \
+ /* ES6 section 20.2.2.4 Math.asin ( x ) */ \
+ TFJ(MathAsin, 1) \
+ /* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
+ TFJ(MathAsinh, 1) \
+ /* ES6 section 20.2.2.6 Math.atan ( x ) */ \
+ TFJ(MathAtan, 1) \
+ /* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
+ TFJ(MathAtanh, 1) \
+ /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
+ TFJ(MathAtan2, 2) \
+ /* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
+ TFJ(MathCbrt, 1) \
+ /* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
+ TFJ(MathCeil, 1) \
+ /* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
+ TFJ(MathClz32, 1) \
+ /* ES6 section 20.2.2.12 Math.cos ( x ) */ \
+ TFJ(MathCos, 1) \
+ /* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
+ TFJ(MathCosh, 1) \
+ /* ES6 section 20.2.2.14 Math.exp ( x ) */ \
+ TFJ(MathExp, 1) \
+ /* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
+ TFJ(MathExpm1, 1) \
+ /* ES6 section 20.2.2.16 Math.floor ( x ) */ \
+ TFJ(MathFloor, 1) \
+ /* ES6 section 20.2.2.17 Math.fround ( x ) */ \
+ TFJ(MathFround, 1) \
+ /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
+ CPP(MathHypot) \
+ /* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
+ TFJ(MathImul, 2) \
+ /* ES6 section 20.2.2.20 Math.log ( x ) */ \
+ TFJ(MathLog, 1) \
+ /* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
+ TFJ(MathLog1p, 1) \
+ /* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
+ TFJ(MathLog10, 1) \
+ /* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
+ TFJ(MathLog2, 1) \
+ /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
+ ASM(MathMax) \
+ /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
+ ASM(MathMin) \
+ /* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
+ TFJ(MathPow, 2) \
+ /* ES6 section 20.2.2.27 Math.random */ \
+ TFJ(MathRandom, 0) \
+ /* ES6 section 20.2.2.28 Math.round ( x ) */ \
+ TFJ(MathRound, 1) \
+ /* ES6 section 20.2.2.29 Math.sign ( x ) */ \
+ TFJ(MathSign, 1) \
+ /* ES6 section 20.2.2.30 Math.sin ( x ) */ \
+ TFJ(MathSin, 1) \
+ /* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
+ TFJ(MathSinh, 1) \
+ /* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
+ TFJ(MathTan, 1) \
+ /* ES6 section 20.2.2.33 Math.tan ( x ) */ \
+ TFJ(MathTanh, 1) \
+ /* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
+ TFJ(MathSqrt, 1) \
+ /* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
+ TFJ(MathTrunc, 1) \
+ \
+ /* Number */ \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
+ ASM(NumberConstructor) \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
+ ASM(NumberConstructor_ConstructStub) \
+ /* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
+ TFJ(NumberIsFinite, 1) \
+ /* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
+ TFJ(NumberIsInteger, 1) \
+ /* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
+ TFJ(NumberIsNaN, 1) \
+ /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
+ TFJ(NumberIsSafeInteger, 1) \
+ /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */ \
+ TFJ(NumberParseFloat, 1) \
+ /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */ \
+ TFJ(NumberParseInt, 2) \
+ CPP(NumberPrototypeToExponential) \
+ CPP(NumberPrototypeToFixed) \
+ CPP(NumberPrototypeToLocaleString) \
+ CPP(NumberPrototypeToPrecision) \
+ CPP(NumberPrototypeToString) \
+ /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
+ TFJ(NumberPrototypeValueOf, 0) \
+ TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
+ TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ \
+ /* Object */ \
+ CPP(ObjectAssign) \
+ TFJ(ObjectCreate, 2) \
+ CPP(ObjectDefineGetter) \
+ CPP(ObjectDefineProperties) \
+ CPP(ObjectDefineProperty) \
+ CPP(ObjectDefineSetter) \
+ CPP(ObjectEntries) \
+ CPP(ObjectFreeze) \
+ CPP(ObjectGetOwnPropertyDescriptor) \
+ CPP(ObjectGetOwnPropertyDescriptors) \
+ CPP(ObjectGetOwnPropertyNames) \
+ CPP(ObjectGetOwnPropertySymbols) \
+ CPP(ObjectGetPrototypeOf) \
+ CPP(ObjectSetPrototypeOf) \
+ /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
+ TFJ(ObjectHasOwnProperty, 1) \
+ CPP(ObjectIs) \
+ CPP(ObjectIsExtensible) \
+ CPP(ObjectIsFrozen) \
+ CPP(ObjectIsSealed) \
+ CPP(ObjectKeys) \
+ CPP(ObjectLookupGetter) \
+ CPP(ObjectLookupSetter) \
+ CPP(ObjectPreventExtensions) \
+ /* ES6 section 19.1.3.6 Object.prototype.toString () */ \
+ TFJ(ObjectProtoToString, 0) \
+ CPP(ObjectPrototypePropertyIsEnumerable) \
+ CPP(ObjectPrototypeGetProto) \
+ CPP(ObjectPrototypeSetProto) \
+ CPP(ObjectSeal) \
+ CPP(ObjectValues) \
+ \
+ TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
+ TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
+ TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
+ TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
+ \
+ /* Promise */ \
+ TFJ(PromiseGetCapabilitiesExecutor, 2) \
+ TFJ(NewPromiseCapability, 2) \
+ TFJ(PromiseConstructor, 1) \
+ TFJ(PromiseInternalConstructor, 1) \
+ TFJ(IsPromise, 1) \
+ TFJ(PromiseResolveClosure, 1) \
+ TFJ(PromiseRejectClosure, 1) \
+ TFJ(PromiseThen, 2) \
+ TFJ(PromiseCatch, 1) \
+ TFJ(PerformPromiseThen, 4) \
+ TFJ(ResolvePromise, 2) \
+ TFS(PromiseHandleReject, BUILTIN, kNoExtraICState, PromiseHandleReject) \
+ TFJ(PromiseHandle, 5) \
+ TFJ(PromiseResolve, 1) \
+ TFJ(PromiseReject, 1) \
+ TFJ(InternalPromiseReject, 3) \
+ \
+ /* Proxy */ \
+ CPP(ProxyConstructor) \
+ CPP(ProxyConstructor_ConstructStub) \
+ \
+ /* Reflect */ \
+ ASM(ReflectApply) \
+ ASM(ReflectConstruct) \
+ CPP(ReflectDefineProperty) \
+ CPP(ReflectDeleteProperty) \
+ CPP(ReflectGet) \
+ CPP(ReflectGetOwnPropertyDescriptor) \
+ CPP(ReflectGetPrototypeOf) \
+ CPP(ReflectHas) \
+ CPP(ReflectIsExtensible) \
+ CPP(ReflectOwnKeys) \
+ CPP(ReflectPreventExtensions) \
+ CPP(ReflectSet) \
+ CPP(ReflectSetPrototypeOf) \
+ \
+ /* RegExp */ \
+ CPP(RegExpCapture1Getter) \
+ CPP(RegExpCapture2Getter) \
+ CPP(RegExpCapture3Getter) \
+ CPP(RegExpCapture4Getter) \
+ CPP(RegExpCapture5Getter) \
+ CPP(RegExpCapture6Getter) \
+ CPP(RegExpCapture7Getter) \
+ CPP(RegExpCapture8Getter) \
+ CPP(RegExpCapture9Getter) \
+ TFJ(RegExpConstructor, 2) \
+ TFJ(RegExpInternalMatch, 2) \
+ CPP(RegExpInputGetter) \
+ CPP(RegExpInputSetter) \
+ CPP(RegExpLastMatchGetter) \
+ CPP(RegExpLastParenGetter) \
+ CPP(RegExpLeftContextGetter) \
+ TFJ(RegExpPrototypeCompile, 2) \
+ TFJ(RegExpPrototypeExec, 1) \
+ TFJ(RegExpPrototypeFlagsGetter, 0) \
+ TFJ(RegExpPrototypeGlobalGetter, 0) \
+ TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
+ TFJ(RegExpPrototypeMatch, 1) \
+ TFJ(RegExpPrototypeMultilineGetter, 0) \
+ TFJ(RegExpPrototypeReplace, 2) \
+ TFJ(RegExpPrototypeSearch, 1) \
+ TFJ(RegExpPrototypeSourceGetter, 0) \
+ TFJ(RegExpPrototypeSplit, 2) \
+ TFJ(RegExpPrototypeStickyGetter, 0) \
+ TFJ(RegExpPrototypeTest, 1) \
+ CPP(RegExpPrototypeToString) \
+ TFJ(RegExpPrototypeUnicodeGetter, 0) \
+ CPP(RegExpRightContextGetter) \
+ \
+ /* SharedArrayBuffer */ \
+ CPP(SharedArrayBufferPrototypeGetByteLength) \
+ TFJ(AtomicsLoad, 2) \
+ TFJ(AtomicsStore, 3) \
+ \
+ /* String */ \
+ ASM(StringConstructor) \
+ ASM(StringConstructor_ConstructStub) \
+ CPP(StringFromCodePoint) \
+ /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
+ TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
+ TFJ(StringPrototypeCharAt, 1) \
+ /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
+ TFJ(StringPrototypeCharCodeAt, 1) \
+ /* ES6 section 21.1.3.6 */ \
+ /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \
+ CPP(StringPrototypeEndsWith) \
+ /* ES6 section 21.1.3.7 */ \
+ /* String.prototype.includes ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeIncludes) \
+ /* ES6 section #sec-string.prototype.indexof */ \
+ /* String.prototype.indexOf ( searchString [ , position ] ) */ \
+ TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 section 21.1.3.9 */ \
+ /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeLastIndexOf) \
+ /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */ \
+ CPP(StringPrototypeLocaleCompare) \
+ /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
+ CPP(StringPrototypeNormalize) \
+ /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
+ TFJ(StringPrototypeSubstr, 2) \
+ /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
+ TFJ(StringPrototypeSubstring, 2) \
+ /* ES6 section 21.1.3.20 */ \
+ /* String.prototype.startsWith ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeStartsWith) \
+ /* ES6 section 21.1.3.25 String.prototype.toString () */ \
+ TFJ(StringPrototypeToString, 0) \
+ CPP(StringPrototypeTrim) \
+ CPP(StringPrototypeTrimLeft) \
+ CPP(StringPrototypeTrimRight) \
+ /* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
+ TFJ(StringPrototypeValueOf, 0) \
+ /* ES6 #sec-string.prototype-@@iterator */ \
+ TFJ(StringPrototypeIterator, 0) \
+ \
+ /* StringIterator */ \
+ TFJ(StringIteratorPrototypeNext, 0) \
+ \
+ /* Symbol */ \
+ CPP(SymbolConstructor) \
+ CPP(SymbolConstructor_ConstructStub) \
+ /* ES6 section 19.4.2.1 Symbol.for */ \
+ CPP(SymbolFor) \
+ /* ES6 section 19.4.2.5 Symbol.keyFor */ \
+ CPP(SymbolKeyFor) \
+ /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
+ TFJ(SymbolPrototypeToPrimitive, 1) \
+ /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
+ TFJ(SymbolPrototypeToString, 0) \
+ /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
+ TFJ(SymbolPrototypeValueOf, 0) \
+ \
+ /* TypedArray */ \
+ CPP(TypedArrayPrototypeBuffer) \
+ /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
+ TFJ(TypedArrayPrototypeByteLength, 0) \
+ /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
+ TFJ(TypedArrayPrototypeByteOffset, 0) \
+ /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
+ TFJ(TypedArrayPrototypeLength, 0) \
+ /* ES6 #sec-%typedarray%.prototype.entries */ \
+ TFJ(TypedArrayPrototypeEntries, 0) \
+ /* ES6 #sec-%typedarray%.prototype.keys */ \
+ TFJ(TypedArrayPrototypeKeys, 0) \
+ /* ES6 #sec-%typedarray%.prototype.values */ \
+ TFJ(TypedArrayPrototypeValues, 0)
#define IGNORE_BUILTIN(...)
@@ -717,8 +791,10 @@ namespace internal {
IGNORE_BUILTIN, IGNORE_BUILTIN, V)
// Forward declarations.
-class CodeStubAssembler;
class ObjectVisitor;
+namespace compiler {
+class CodeAssemblerState;
+}
class Builtins {
public:
@@ -761,6 +837,9 @@ class Builtins {
TailCallMode tail_call_mode,
CallableType function_type = CallableType::kAny);
Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
+ Handle<Code> NewFunctionContext(ScopeType scope_type);
+ Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
+ Handle<Code> NewCloneShallowObject(int length);
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
@@ -817,16 +896,13 @@ class Builtins {
static void Generate_InterpreterPushArgsAndConstructImpl(
MacroAssembler* masm, CallableType function_type);
- static void Generate_DatePrototype_GetField(CodeStubAssembler* masm,
- int field_index);
-
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
#define DECLARE_ASM(Name, ...) \
static void Generate_##Name(MacroAssembler* masm);
#define DECLARE_TF(Name, ...) \
- static void Generate_##Name(CodeStubAssembler* csasm);
+ static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
DECLARE_ASM, DECLARE_ASM, DECLARE_ASM)
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 4287333d3f..2cf1708b12 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -135,8 +135,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(edi);
__ Push(edx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(edi);
@@ -386,17 +386,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
// Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -437,19 +436,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(eax);
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -457,56 +457,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(eax); // Return address.
- __ Push(ebp); // Caller's frame pointer.
- __ Move(ebp, esp);
- __ Push(esi); // Callee's context.
- __ Push(edi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- {
- Label done_loop, loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(equal, &done_loop, Label::kNear);
- __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
- Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
- // Resume the generator function at the continuation.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ mov(eax, ebx); // Continuation expects generator object in eax.
- __ jmp(edx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx);
__ Push(edx);
__ Push(edi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
__ Pop(ebx);
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -605,6 +562,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
// Push Smi tagged initial bytecode array offset.
@@ -1092,12 +1054,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1165,14 +1121,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
- __ and_(ebx, Code::KindField::kMask);
- __ shr(ebx, Code::KindField::kShift);
- __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ Move(ebx, masm->CodeObject());
+ __ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1294,14 +1250,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1969,8 +1920,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
}
@@ -2132,8 +2083,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2193,7 +2144,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(eax, &create_runtime);
// Load the map of argumentsList into ecx.
@@ -2237,6 +2189,22 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(eax, ecx);
__ jmp(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2244,10 +2212,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ j(above, &create_runtime);
__ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2287,18 +2257,26 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ movd(xmm0, edx);
+ __ movd(xmm1, edi);
__ PopReturnAddressTo(edx);
__ Move(ecx, Immediate(0));
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmp(ecx, ebx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ mov(edi,
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(edi);
__ inc(ecx);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
+ __ movd(edi, xmm1);
__ movd(edx, xmm0);
__ Move(eax, ebx);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index b9c4a72dd0..78ca6c5a6f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -139,7 +139,7 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ LoadRoot(t2, root_index);
__ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
- Label done_loop, loop;
+ Label done_loop, loop, done;
__ mov(a3, a0);
__ bind(&loop);
{
@@ -195,15 +195,25 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// accumulator value on the left hand side (f0) and the next parameter value
// on the right hand side (f2).
// We need to work out which HeapNumber (or smi) the result came from.
- Label compare_nan, set_value;
+ Label compare_nan, set_value, ool_min, ool_max;
__ BranchF(nullptr, &compare_nan, eq, f0, f2);
__ Move(t0, t1, f0);
if (kind == MathMaxMinKind::kMin) {
- __ MinNaNCheck_d(f0, f0, f2);
+ __ Float64Min(f0, f0, f2, &ool_min);
} else {
DCHECK(kind == MathMaxMinKind::kMax);
- __ MaxNaNCheck_d(f0, f0, f2);
+ __ Float64Max(f0, f0, f2, &ool_max);
}
+ __ jmp(&done);
+
+ __ bind(&ool_min);
+ __ Float64MinOutOfLine(f0, f0, f2);
+ __ jmp(&done);
+
+ __ bind(&ool_max);
+ __ Float64MaxOutOfLine(f0, f0, f2);
+
+ __ bind(&done);
__ Move(at, t8, f0);
__ Branch(&set_value, ne, t0, Operand(at));
__ Branch(&set_value, ne, t1, Operand(t8));
@@ -331,11 +341,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -482,11 +492,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -575,8 +585,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(a1, a3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(t4, v0);
__ Pop(a1, a3);
@@ -854,18 +864,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ li(t1, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(t1, Operand(debug_hook));
__ lb(t1, MemOperand(t1));
- __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+ __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -905,14 +914,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- __ GetObjectType(a3, a3, a3);
- __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
@@ -927,54 +937,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(a2);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(ra, fp);
- __ Move(fp, sp);
- __ Push(cp, t0);
-
- // Restore the operand stack.
- __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
- __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ Branch(&done_loop, eq, a0, Operand(a3));
- __ lw(t1, MemOperand(a0));
- __ Push(t1);
- __ Branch(USE_DELAY_SLOT, &loop);
- __ addiu(a0, a0, kPointerSize); // In delay slot.
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Addu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Move(v0, a1); // Continuation expects generator object in v0.
- __ Jump(a3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a2, t0);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1071,6 +1038,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Reset code age.
+ DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+ __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1407,11 +1379,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // OSR id set to none?
- __ lw(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ lw(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1484,13 +1451,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ And(t1, t1,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
- __ And(t1, t1, Operand(Code::KindField::kMask));
- __ srl(t1, t1, Code::KindField::kShift);
- __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
- // Yes, install the full code.
+ __ Move(t1, masm->CodeObject());
+ __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+ // Install the SFI's code entry.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
@@ -1605,14 +1572,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2173,7 +2135,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(a0, &create_runtime);
// Load the map of argumentsList into a2.
@@ -2189,8 +2152,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Branch(&create_arguments, eq, a2, Operand(at));
// Check if argumentsList is a fast JSArray.
- __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
// Ask the runtime to create the list (actually a FixedArray).
@@ -2216,15 +2178,32 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(a0, t0);
__ Branch(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ lw(at, ContextMemOperand(t0, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+ __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ Branch(&create_runtime, ne, a2,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+ __ Branch(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
- __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(a2);
+ __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(t1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
- __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
__ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
__ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
__ SmiUntag(a2);
@@ -2259,11 +2238,15 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(t0, zero_reg);
- Label done, loop;
+ Label done, push, loop;
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
__ Branch(&done, eq, t0, Operand(a2));
__ Lsa(at, a0, t0, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Branch(&push, ne, t1, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
__ Push(at);
__ Addu(t0, t0, Operand(1));
__ Branch(&loop);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index a6abb55c46..9541d8d5d0 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -139,7 +139,7 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ LoadRoot(t1, root_index);
__ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
- Label done_loop, loop;
+ Label done_loop, loop, done;
__ mov(a3, a0);
__ bind(&loop);
{
@@ -195,15 +195,25 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// accumulator value on the left hand side (f0) and the next parameter value
// on the right hand side (f2).
// We need to work out which HeapNumber (or smi) the result came from.
- Label compare_nan;
+ Label compare_nan, ool_min, ool_max;
__ BranchF(nullptr, &compare_nan, eq, f0, f2);
__ Move(a4, f0);
if (kind == MathMaxMinKind::kMin) {
- __ MinNaNCheck_d(f0, f0, f2);
+ __ Float64Min(f0, f0, f2, &ool_min);
} else {
DCHECK(kind == MathMaxMinKind::kMax);
- __ MaxNaNCheck_d(f0, f0, f2);
+ __ Float64Max(f0, f0, f2, &ool_max);
}
+ __ jmp(&done);
+
+ __ bind(&ool_min);
+ __ Float64MinOutOfLine(f0, f0, f2);
+ __ jmp(&done);
+
+ __ bind(&ool_max);
+ __ Float64MaxOutOfLine(f0, f0, f2);
+
+ __ bind(&done);
__ Move(at, f0);
__ Branch(&loop, eq, a4, Operand(at));
__ mov(t1, a2);
@@ -329,11 +339,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -481,11 +491,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -572,8 +582,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
__ Push(a1, a3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(t0, v0);
__ Pop(a1, a3);
@@ -730,18 +740,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
__ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ li(a5, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, Operand(debug_hook));
__ lb(a5, MemOperand(a5));
- __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -781,14 +790,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- __ GetObjectType(a3, a3, a3);
- __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
@@ -802,55 +812,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(a2);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(ra, fp);
- __ Move(fp, sp);
- __ Push(cp, a4);
-
- // Restore the operand stack.
- __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
- __ SmiUntag(a3);
- __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Dlsa(a3, a0, a3, kPointerSizeLog2);
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ Branch(&done_loop, eq, a0, Operand(a3));
- __ ld(a5, MemOperand(a0));
- __ Push(a5);
- __ Branch(USE_DELAY_SLOT, &loop);
- __ daddiu(a0, a0, kPointerSize); // In delay slot.
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Daddu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Move(v0, a1); // Continuation expects generator object in v0.
- __ Jump(a3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a2, a4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1063,6 +1029,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Reset code age.
+ DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+ __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1318,9 +1289,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ ld(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ __ lw(
+ kInterpreterBytecodeOffsetRegister,
+ UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
@@ -1399,11 +1370,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // OSR id set to none?
- __ ld(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ ld(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1476,13 +1442,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ And(a5, a5,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ And(a5, a5, Operand(Code::KindField::kMask));
- __ dsrl(a5, a5, Code::KindField::kShift);
- __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
- // Yes, install the full code.
+ __ Move(t1, masm->CodeObject());
+ __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+ // Install the SFI's code entry.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
@@ -1596,14 +1562,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1686,8 +1647,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it -> a6.
- __ ld(a6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(a6);
+ __ lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
// Switch on the state.
Label with_tos_register, unknown_state;
__ Branch(
@@ -1855,10 +1815,10 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
- __ SmiUntag(a1);
+ __ lw(a1,
+ UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1886,52 +1846,56 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arg_array = a0;
+ Register receiver = a1;
+ Register this_arg = a2;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
// 1. Load receiver into a1, argArray into a0 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ mov(a3, a2);
- // Dlsa() cannot be used hare as scratch value used later.
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ Daddu(a0, sp, Operand(scratch));
- __ ld(a1, MemOperand(a0)); // receiver
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // thisArg
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // argArray
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ sd(a2, MemOperand(sp));
- __ mov(a0, a3);
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(2 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(this_arg, arg_array); // Overwrite argc
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0
+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1
+ __ ld(receiver, MemOperand(sp));
+ __ sd(this_arg, MemOperand(sp));
}
// ----------- S t a t e -------------
// -- a0 : argArray
// -- a1 : receiver
+ // -- a3 : undefined root value
// -- sp[0] : thisArg
// -----------------------------------
// 2. Make sure the receiver is actually callable.
Label receiver_not_callable;
- __ JumpIfSmi(a1, &receiver_not_callable);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(receiver, &receiver_not_callable);
+ __ ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
// 4a. Apply the receiver to the given argArray (passing undefined for
// new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ DCHECK(undefined_value.is(a3));
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
@@ -1939,13 +1903,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arguments);
{
__ mov(a0, zero_reg);
+ DCHECK(receiver.is(a1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
// 4c. The receiver is not callable, throw an appropriate TypeError.
__ bind(&receiver_not_callable);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(receiver, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -1995,62 +1960,67 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
+ // -- sp[0] : argumentsList (if argc ==3)
+ // -- sp[4] : thisArgument (if argc >=2)
+ // -- sp[8] : target (if argc >=1)
// -- sp[12] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arguments_list = a0;
+ Register target = a1;
+ Register this_argument = a2;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ mov(a2, a1);
- __ mov(a3, a1);
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ mov(a0, scratch);
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(zero_reg));
- __ Daddu(a0, sp, Operand(a0));
- __ ld(a1, MemOperand(a0)); // target
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // thisArgument
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // argumentsList
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ sd(a2, MemOperand(sp));
- __ mov(a0, a3);
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(target, this_argument, arguments_list);
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
+
+ __ sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
// -- a0 : argumentsList
// -- a1 : target
+ // -- a3 : undefined root value
// -- sp[0] : thisArgument
// -----------------------------------
// 2. Make sure the target is actually callable.
Label target_not_callable;
- __ JumpIfSmi(a1, &target_not_callable);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(target, &target_not_callable);
+ __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
// 3a. Apply the target to the given argumentsList (passing undefined for
// new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ DCHECK(undefined_value.is(a3));
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 3b. The target is not callable, throw an appropriate TypeError.
__ bind(&target_not_callable);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -2058,59 +2028,61 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
+ // -- sp[0] : new.target (optional) (dummy value if argc <= 2)
+ // -- sp[4] : argumentsList (dummy value if argc <= 1)
+ // -- sp[8] : target (dummy value if argc == 0)
// -- sp[12] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arguments_list = a0;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+ Register scratch = a5;
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ mov(a2, a1);
- // Dlsa() cannot be used hare as scratch value used later.
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ Daddu(a0, sp, Operand(scratch));
- __ sd(a2, MemOperand(a0)); // receiver
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a1, MemOperand(a0)); // target
- __ mov(a3, a1); // new.target defaults to target
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // argumentsList
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // new.target
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ mov(a0, a2);
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(target, arguments_list, new_target);
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(new_target, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(new_target, target, scratch); // if argc == 1
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(new_target, target, scratch); // if argc == 2
+
+ __ sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
// -- a0 : argumentsList
- // -- a3 : new.target
// -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. Make sure the target is actually a constructor.
Label target_not_constructor;
- __ JumpIfSmi(a1, &target_not_constructor);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(target, &target_not_constructor);
+ __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
- __ JumpIfSmi(a3, &new_target_not_constructor);
- __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ JumpIfSmi(new_target, &new_target_not_constructor);
+ __ ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
@@ -2121,14 +2093,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// 4b. The target is not a constructor, throw an appropriate TypeError.
__ bind(&target_not_constructor);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
- __ sd(a3, MemOperand(sp));
+ __ sd(new_target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -2167,63 +2139,90 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// -- sp[0] : thisArgument
// -----------------------------------
+ Register arguments_list = a0;
+ Register target = a1;
+ Register new_target = a3;
+
+ Register args = a0;
+ Register len = a2;
+
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
- __ JumpIfSmi(a0, &create_runtime);
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
+ __ JumpIfSmi(arguments_list, &create_runtime);
// Load the map of argumentsList into a2.
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ Register arguments_list_map = a2;
+ __ ld(arguments_list_map,
+ FieldMemOperand(arguments_list, HeapObject::kMapOffset));
// Load native context into a4.
- __ ld(a4, NativeContextMemOperand());
+ Register native_context = a4;
+ __ ld(native_context, NativeContextMemOperand());
// Check if argumentsList is an (unmodified) arguments object.
- __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
- __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
// Check if argumentsList is a fast JSArray.
- __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
// Ask the runtime to create the list (actually a FixedArray).
__ bind(&create_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3, a0);
+ __ Push(target, new_target, arguments_list);
__ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(a0, v0);
- __ Pop(a1, a3);
- __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
+ __ mov(arguments_list, v0);
+ __ Pop(target, new_target);
+ __ lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
}
__ Branch(&done_create);
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
- __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
- __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ lw(len, UntagSmiFieldMemOperand(arguments_list,
+ JSArgumentsObject::kLengthOffset));
+ __ ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+ __ lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, len, Operand(at));
+ __ mov(args, a4);
+
+ __ Branch(&done_create);
+
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&create_runtime, ne, a2, Operand(at));
- __ SmiUntag(a2);
- __ mov(a0, a4);
+ __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+ __ lw(a2, UntagSmiFieldMemOperand(at, PropertyCell::kValueOffset));
+ __ Branch(&create_runtime, ne, a2,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
__ Branch(&done_create);
// Try to create the list from a JSArray object.
__ bind(&create_array);
- __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(a2);
+ __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(t1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
- __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
+ __ lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
__ bind(&done_create);
}
@@ -2238,7 +2237,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ Dsubu(a4, sp, a4);
// Check if the arguments will overflow the stack.
- __ dsll(at, a2, kPointerSizeLog2);
+ __ dsll(at, len, kPointerSizeLog2);
__ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
@@ -2254,19 +2253,38 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(a4, zero_reg);
- Label done, loop;
+ Label done, push, loop;
+ Register src = a4;
+ Register scratch = len;
+
+ __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
+ __ mov(a0, len); // The 'len' argument for Call() or Construct().
+ __ dsll(scratch, len, kPointerSizeLog2);
+ __ Dsubu(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
- __ Branch(&done, eq, a4, Operand(a2));
- __ Dlsa(at, a0, a4, kPointerSizeLog2);
- __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
- __ Push(at);
- __ Daddu(a4, a4, Operand(1));
- __ Branch(&loop);
+ __ ld(a5, MemOperand(src));
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ daddiu(src, src, kPointerSize);
+ __ Push(a5);
+ __ Branch(&loop, ne, scratch, Operand(sp));
__ bind(&done);
- __ Move(a0, a4);
}
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (len)
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefinded)
+ // -- sp[0] : args[len-1]
+ // -- sp[8] : args[len-2]
+ // ... : ...
+ // -- sp[8*(len-2)] : args[1]
+ // -- sp[8*(len-1)] : args[0]
+ // ----------------------------------
+
// Dispatch to Call or Construct depending on whether new.target is undefined.
{
Label construct;
@@ -2344,9 +2362,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ld(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
+ __ lw(caller_args_count_reg,
+ UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Branch(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
@@ -2503,8 +2520,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Load [[BoundArguments]] into a2 and length of that into a4.
__ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2551,8 +2567,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
@@ -2665,8 +2680,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2714,8 +2728,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index be1e67cc30..ca6cefcddf 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -338,8 +338,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
__ SmiUntag(r9);
@@ -490,8 +490,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
__ SmiUntag(r9);
@@ -587,8 +587,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(r4, r6);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mr(r7, r3);
__ Pop(r4, r6);
@@ -749,20 +749,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
// Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ LoadByte(ip, MemOperand(ip), r0);
__ extsb(ip, ip);
- __ cmpi(ip, Operand(StepIn));
- __ bge(&prepare_step_in_if_stepping);
+ __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -812,13 +811,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
- __ bne(&old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
@@ -829,62 +829,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r7);
-
- // Restore the operand stack.
- __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ addi(r3, r3,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r6, SetRC);
- __ beq(&done_loop, cr0);
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadPU(ip, MemOperand(r3, kPointerSize));
- __ Push(ip);
- __ bdnz(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
- r0);
-
- // Resume the generator function at the continuation.
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
- __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r5);
- __ add(r6, r6, r5);
- __ LoadSmiLiteral(r5,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
- r0);
- __ mr(r3, r4); // Continuation expects generator object in r3.
- __ Jump(r6);
- }
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r5, r7);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4, r5);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
@@ -1099,12 +1048,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r8, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ StoreByte(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ r0);
+
// Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1347,7 +1302,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
@@ -1434,13 +1389,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ bne(&loop_bottom);
- // OSR id set to none?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
- __ bne(&loop_bottom);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
@@ -1507,13 +1455,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime, cr0);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(r8);
- __ cmpi(r8, Operand(Code::BUILTIN));
+ __ mov(r8, Operand(masm->CodeObject()));
+ __ cmp(entry, r8);
__ beq(&gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
@@ -1627,14 +1576,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(ip);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2219,7 +2163,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r3, &create_runtime);
// Load the map of argumentsList into r5.
@@ -2263,17 +2208,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mr(r3, r7);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ LoadP(r5, FieldMemOperand(r5, Map::kPrototypeOffset));
+ __ LoadP(r7, ContextMemOperand(r7, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(r5, r7);
+ __ bne(&create_runtime);
+ __ LoadRoot(r7, Heap::kArrayProtectorRootIndex);
+ __ LoadP(r5, FieldMemOperand(r7, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(r5, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&create_runtime);
+ __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ SmiUntag(r5);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r5 and r7 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r5);
+ __ lbz(r8, FieldMemOperand(r5, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r8);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmpi(r5, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ cmpi(r8, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&create_runtime);
- __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ beq(&create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ TestBit(r8, Map::kHasNonInstancePrototype, r0);
+ __ bne(&create_holey_array, cr0);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
__ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ SmiUntag(r5);
@@ -2308,15 +2273,20 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label loop, no_args;
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ Label loop, no_args, skip;
__ cmpi(r5, Operand::Zero());
__ beq(&no_args);
__ addi(r3, r3,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ mtctr(r5);
__ bind(&loop);
- __ LoadPU(r0, MemOperand(r3, kPointerSize));
- __ push(r0);
+ __ LoadPU(ip, MemOperand(r3, kPointerSize));
+ __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ __ mr(ip, r9);
+ __ bind(&skip);
+ __ push(ip);
__ bdnz(&loop);
__ bind(&no_args);
__ mr(r3, r5);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 8655ab8d79..2b7c4a5b10 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -334,11 +334,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
__ SmiUntag(r8);
@@ -484,11 +484,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
__ SmiUntag(r8);
@@ -584,8 +584,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(r3, r5);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ LoadRR(r6, r2);
__ Pop(r3, r5);
@@ -748,19 +748,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ LoadB(ip, MemOperand(ip));
- __ CmpP(ip, Operand(StepIn));
- __ bge(&prepare_step_in_if_stepping);
+ __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -811,13 +810,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
- __ bne(&old_generator, Label::kNear);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
@@ -827,61 +827,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
__ JumpToJSEntry(ip);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r6);
-
- // Restore the operand stack.
- __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ AddP(r2, r2,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r5);
- __ LoadAndTestP(r5, r5);
- __ beq(&done_loop);
- __ LoadRR(r1, r5);
- __ bind(&loop);
- __ LoadP(ip, MemOperand(r2, kPointerSize));
- __ la(r2, MemOperand(r2, kPointerSize));
- __ Push(ip);
- __ BranchOnCount(r1, &loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
- r0);
-
- // Resume the generator function at the continuation.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
- __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r4);
- __ AddP(r5, r5, r4);
- __ LoadSmiLiteral(r4,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ LoadRR(r2, r3); // Continuation expects generator object in r2.
- __ Jump(r5);
- }
- }
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4, r6);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3, r4);
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
@@ -1106,6 +1057,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ StoreByte(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ r0);
+
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1437,13 +1394,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ CmpP(temp, native_context);
__ bne(&loop_bottom, Label::kNear);
- // OSR id set to none?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
- __ bne(&loop_bottom, Label::kNear);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
@@ -1510,13 +1460,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(r7);
- __ CmpP(r7, Operand(Code::BUILTIN));
+ __ mov(r7, Operand(masm->CodeObject()));
+ __ CmpP(entry, r7);
__ beq(&gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7);
@@ -1632,14 +1583,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(ip);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2228,7 +2174,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r2, &create_runtime);
// Load the map of argumentsList into r4.
@@ -2272,17 +2219,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ LoadRR(r2, r6);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
+ __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ CmpP(r4, r6);
+ __ bne(&create_runtime);
+ __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
+ __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&create_runtime);
+ __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+ __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+ __ SmiUntag(r4);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r4 and r6 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r4);
+ __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r7);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ CmpP(r4, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&create_runtime);
- __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ beq(&create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
+ __ bne(&create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
__ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
__ SmiUntag(r4);
@@ -2317,16 +2284,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label loop, no_args;
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ Label loop, no_args, skip;
__ CmpP(r4, Operand::Zero());
__ beq(&no_args);
__ AddP(r2, r2,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ LoadRR(r1, r4);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r2, kPointerSize));
+ __ LoadP(ip, MemOperand(r2, kPointerSize));
__ la(r2, MemOperand(r2, kPointerSize));
- __ push(r0);
+ __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip, Label::kNear);
+ __ LoadRR(ip, r8);
+ __ bind(&skip);
+ __ push(ip);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ LoadRR(r2, r4);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index cde02647ac..1404a9b4c9 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -137,8 +137,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(rdi);
__ Push(rdx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ movp(rbx, rax);
__ Pop(rdx);
__ Pop(rdi);
@@ -151,9 +151,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Retrieve smi-tagged arguments count from the stack.
__ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
- }
- if (create_implicit_receiver) {
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
@@ -460,18 +458,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
// Load suspended function and context.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ Operand debug_hook_operand = masm->ExternalOperand(debug_hook);
STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(last_step_action_operand, Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ __ cmpb(debug_hook_operand, Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -514,14 +512,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -534,60 +533,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object.
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(rax); // Return address.
- __ Push(rbp); // Caller's frame pointer.
- __ Move(rbp, rsp);
- __ Push(rsi); // Callee's context.
- __ Push(rdi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
- {
- Label done_loop, loop;
- __ Set(rcx, 0);
- __ bind(&loop);
- __ cmpl(rcx, rax);
- __ j(equal, &done_loop, Label::kNear);
- __ Push(
- FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ addl(rcx, Immediate(1));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
- Heap::kEmptyFixedArrayRootIndex);
-
- // Restore context.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
-
- // Resume the generator function at the continuation.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ SmiToInteger64(
- rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
- __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ movp(rax, rbx); // Continuation expects generator object in rax.
- __ jmp(rdx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rbx);
__ Push(rdx);
__ Push(rdi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ Pop(rbx);
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
@@ -689,6 +641,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ movb(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Load initial bytecode offset.
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1060,13 +1017,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmpp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ movp(temp, FieldOperand(map, index, times_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- __ SmiToInteger32(temp, temp);
- const int bailout_id = BailoutId::None().ToInt();
- __ cmpl(temp, Immediate(bailout_id));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ movp(temp, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1126,14 +1076,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
- __ andl(rbx, Immediate(Code::KindField::kMask));
- __ shrl(rbx, Immediate(Code::KindField::kShift));
- __ cmpl(rbx, Immediate(Code::BUILTIN));
+ __ Move(rbx, masm->CodeObject());
+ __ cmpp(entry, rbx);
__ j(equal, &gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1166,7 +1116,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve argument count for later compare.
- __ movp(kScratchRegister, rax);
+ __ movp(rcx, rax);
// Push the number of arguments to the callee.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
@@ -1181,7 +1131,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
for (int j = 0; j < 4; ++j) {
Label over;
if (j < 3) {
- __ cmpp(kScratchRegister, Immediate(j));
+ __ cmpp(rcx, Immediate(j));
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
@@ -1204,13 +1154,13 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(rax, &failed, Label::kNear);
__ Drop(2);
- __ Pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ Pop(rcx);
+ __ SmiToInteger32(rcx, rcx);
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
- __ incp(kScratchRegister);
- __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ incp(rcx);
+ __ leap(rsp, Operand(rsp, rcx, times_pointer_size, 0));
__ PushReturnAddressFrom(rbx);
__ ret(0);
@@ -1248,14 +1198,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1931,8 +1876,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
}
@@ -2086,8 +2031,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
}
@@ -2292,7 +2237,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(rax, &create_runtime);
// Load the map of argumentsList into rcx.
@@ -2335,6 +2281,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ movp(rax, rcx);
__ jmp(&done_create);
+ __ bind(&create_holey_array);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ cmpp(rcx, ContextOperand(rbx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &create_runtime);
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
@@ -2342,10 +2303,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmpl(rcx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array);
+ __ j(above, &create_runtime);
__ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
__ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
@@ -2383,12 +2346,18 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
{
__ PopReturnAddressTo(r8);
__ Set(rcx, 0);
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmpl(rcx, rbx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ movp(r9, FieldOperand(rax, rcx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(r9);
__ incl(rcx);
__ jmp(&loop);
__ bind(&done);
diff --git a/deps/v8/src/builtins/x87/OWNERS b/deps/v8/src/builtins/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/builtins/x87/OWNERS
+++ b/deps/v8/src/builtins/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index 2187f86f61..9071beb59d 100644
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -135,8 +135,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(edi);
__ Push(edx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(edi);
@@ -387,17 +387,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
// Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -438,19 +437,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(eax);
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -458,56 +458,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(eax); // Return address.
- __ Push(ebp); // Caller's frame pointer.
- __ Move(ebp, esp);
- __ Push(esi); // Callee's context.
- __ Push(edi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- {
- Label done_loop, loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(equal, &done_loop, Label::kNear);
- __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
- Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
- // Resume the generator function at the continuation.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ mov(eax, ebx); // Continuation expects generator object in eax.
- __ jmp(edx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx);
__ Push(edx);
__ Push(edi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
__ Pop(ebx);
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -606,6 +563,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
// Push Smi tagged initial bytecode array offset.
@@ -1093,12 +1055,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1166,14 +1122,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
- __ and_(ebx, Code::KindField::kMask);
- __ shr(ebx, Code::KindField::kShift);
- __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ Move(ebx, masm->CodeObject());
+ __ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1295,14 +1251,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1986,8 +1937,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
}
@@ -2149,8 +2100,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2210,7 +2161,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(eax, &create_runtime);
// Load the map of argumentsList into ecx.
@@ -2254,6 +2206,22 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(eax, ecx);
__ jmp(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2261,10 +2229,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ j(above, &create_runtime);
__ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2303,26 +2273,38 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
+ // Save edx/edi to stX0/stX1.
__ push(edx);
+ __ push(edi);
__ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 4));
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
__ PopReturnAddressTo(edx);
__ Move(ecx, Immediate(0));
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmp(ecx, ebx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ mov(edi,
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(edi);
__ inc(ecx);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
- __ lea(esp, Operand(esp, -kFloatSize));
+ // Restore edx/edi from stX0/stX1.
+ __ lea(esp, Operand(esp, -2 * kFloatSize));
__ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, 4));
__ pop(edx);
+ __ pop(edi);
__ Move(eax, ebx);
}