summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-09-17 14:34:02 +0200
committerMichaël Zasso <targos@protonmail.com>2021-10-02 08:40:27 +0200
commit4f722915218862b108c90371503d26a3fe89f4c9 (patch)
treecd1577faff9acc20eb8e4a0e2d6ed1856bf308fd /deps/v8/src/builtins
parent606bb521591a29df5401732bfbd19c1e31239ed9 (diff)
downloadnode-new-4f722915218862b108c90371503d26a3fe89f4c9.tar.gz
deps: update V8 to 9.4.146.18
PR-URL: https://github.com/nodejs/node/pull/39945 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc111
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc90
-rw-r--r--deps/v8/src/builtins/array-concat.tq49
-rw-r--r--deps/v8/src/builtins/array-findlast.tq110
-rw-r--r--deps/v8/src/builtins/array-findlastindex.tq111
-rw-r--r--deps/v8/src/builtins/arraybuffer.tq114
-rw-r--r--deps/v8/src/builtins/base.tq19
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc101
-rw-r--r--deps/v8/src/builtins/builtins-console.cc24
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h15
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc26
-rw-r--r--deps/v8/src/builtins/builtins-utils.h16
-rw-r--r--deps/v8/src/builtins/cast.tq9
-rw-r--r--deps/v8/src/builtins/conversion.tq2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc150
-rw-r--r--deps/v8/src/builtins/iterator.tq2
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc81
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc81
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc84
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc85
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc44
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq8
-rw-r--r--deps/v8/src/builtins/typed-array-findlast.tq112
-rw-r--r--deps/v8/src/builtins/typed-array-findlastindex.tq115
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq7
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq7
-rw-r--r--deps/v8/src/builtins/typed-array.tq2
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc168
31 files changed, 1261 insertions, 518 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 20312d8336..f45c927e67 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -129,9 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -276,9 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize));
+ __ DropArguments(r1, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -828,7 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ add(sp, sp, params_size, LeaveCC);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1113,14 +1112,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
- __ strh(scratch, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ __ strh(scratch,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
}
__ Push(argc, bytecodeArray);
@@ -1266,11 +1266,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1861,8 +1861,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(2), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r5, MemOperand(sp, 0));
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1938,8 +1938,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(3), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r5, MemOperand(sp, 0));
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1981,8 +1981,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(3), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
+ __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -3479,12 +3479,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3492,6 +3493,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = r4;
+ __ ldr(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ b(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Load the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
@@ -3513,15 +3546,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = r4;
- __ ldr(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ ldr(code_obj,
- FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -3554,6 +3578,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = r0;
Register arg_reg_2 = r1;
@@ -3575,8 +3601,9 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
- __ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ __ strh(scratch,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3600,8 +3627,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ b(&start);
@@ -3609,17 +3638,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 06245ea51f..b1f9a63e3c 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1297,10 +1297,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(bytecode_array,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecode_array);
@@ -1456,10 +1456,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@@ -4005,12 +4005,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(padreg, kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -4018,6 +4019,43 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = x22;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ B(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ }
+
// Load the feedback vector.
Register feedback_vector = x2;
__ LoadTaggedPointerField(
@@ -4040,20 +4078,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = x22;
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
- }
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4086,6 +4110,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(padreg, kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = x0;
Register arg_reg_2 = x1;
@@ -4104,7 +4130,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
@@ -4127,8 +4153,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(padreg, kInterpreterAccumulatorRegister);
__ PushArgument(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister, padreg);
}
// Retry from the start after installing baseline code.
__ B(&start);
@@ -4136,17 +4164,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/array-concat.tq b/deps/v8/src/builtins/array-concat.tq
new file mode 100644
index 0000000000..5eb66e6ce8
--- /dev/null
+++ b/deps/v8/src/builtins/array-concat.tq
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+
+extern builtin ArrayConcat(Context, JSFunction, JSAny, int32): JSAny;
+
+transitioning javascript builtin
+ArrayPrototypeConcat(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // Fast path if we invoke as `x.concat()`.
+ if (arguments.length == 0) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForConcat): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ // Fallthrough.
+ }
+ }
+ }
+
+ // Fast path if we invoke as `[].concat(x)`.
+ try {
+ const receiverAsArray: FastJSArrayForConcat =
+ Cast<FastJSArrayForConcat>(receiver)
+ otherwise ReceiverIsNotFastJSArrayForConcat;
+ if (receiverAsArray.IsEmpty() && arguments.length == 1) {
+ typeswitch (arguments[0]) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ // Fallthrough.
+ }
+ }
+ }
+ } label ReceiverIsNotFastJSArrayForConcat {
+ // Fallthrough.
+ }
+
+ // TODO(victorgomes): Implement slow path ArrayConcat in Torque.
+ tail ArrayConcat(
+ context, LoadTargetFromFrame(), Undefined,
+ Convert<int32>(arguments.length));
+}
+
+} // namespace array
diff --git a/deps/v8/src/builtins/array-findlast.tq b/deps/v8/src/builtins/array-findlast.tq
new file mode 100644
index 0000000000..a359ec915f
--- /dev/null
+++ b/deps/v8/src/builtins/array-findlast.tq
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning builtin ArrayFindLastLoopContinuation(implicit context: Context)(
+ predicate: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number): JSAny {
+ // 5. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny = Call(context, predicate, thisArg, value, k, o);
+
+ // 5d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning macro FastArrayFindLast(implicit context: Context)(
+ o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Number) {
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1);
+ // 4. Let k be len - 1.
+ let k: Smi = smiLen - 1;
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // 5. Repeat, while k ≥ 0
+ // Build a fast loop over the smi array.
+ for (; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence there is no need to
+ // cast ToString for LoadElementOrUndefined.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny =
+ Call(context, predicate, thisArg, value, k, fastOW.Get());
+ // 5d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning javascript builtin
+ArrayPrototypeFindLast(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.findLast');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(predicate) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // If a thisArg parameter is provided, it will be used as the this value for
+ // each invocation of predicate. If it is not provided, undefined is used
+ // instead.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArrayFindLast(o, len, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return ArrayFindLastLoopContinuation(predicate, thisArg, o, k);
+ }
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/array-findlastindex.tq b/deps/v8/src/builtins/array-findlastindex.tq
new file mode 100644
index 0000000000..3b5498f961
--- /dev/null
+++ b/deps/v8/src/builtins/array-findlastindex.tq
@@ -0,0 +1,111 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning builtin ArrayFindLastIndexLoopContinuation(
+ implicit context: Context)(
+ predicate: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number): Number {
+ // 5. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny = Call(context, predicate, thisArg, value, k, o);
+
+ // 5d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return -1𝔽.
+ return Convert<Smi>(-1);
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning macro FastArrayFindLastIndex(implicit context: Context)(
+ o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): Number
+ labels Bailout(Number) {
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1);
+ // 4. Let k be len - 1.
+ let k: Smi = smiLen - 1;
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // 5. Repeat, while k ≥ 0
+ // Build a fast loop over the smi array.
+ for (; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence there is no need to
+ // cast ToString for LoadElementOrUndefined.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny =
+ Call(context, predicate, thisArg, value, k, fastOW.Get());
+ // 5d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning javascript builtin
+ArrayPrototypeFindLastIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.findLastIndex');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(predicate) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // If a thisArg parameter is provided, it will be used as the this value for
+ // each invocation of predicate. If it is not provided, undefined is used
+ // instead.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArrayFindLastIndex(o, len, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return ArrayFindLastIndexLoopContinuation(predicate, thisArg, o, k);
+ }
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq
index 5794414443..fc0152f51a 100644
--- a/deps/v8/src/builtins/arraybuffer.tq
+++ b/deps/v8/src/builtins/arraybuffer.tq
@@ -18,115 +18,103 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength(
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
- if (IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
- // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception.
- // TODO(v8:4895): We don't actually throw here.
- // 6. Let length be O.[[ArrayBufferByteLength]].
+ // 4. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
- // 7. Return length.
+ // 5. Return length.
return Convert<Number>(length);
}
-// #sec-get-sharedarraybuffer.prototype.bytelength
-transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
+// #sec-get-arraybuffer.prototype.maxbytelength
+transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
- const functionName = 'get SharedArrayBuffer.prototype.byteLength';
+ const functionName = 'get ArrayBuffer.prototype.maxByteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
- if (!IsSharedArrayBuffer(o)) {
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
- if (IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ // 4. If IsDetachedBuffer(O) is true, return 0_F.
+ if (IsDetachedBuffer(o)) {
+ return 0;
}
- // 5. Let length be O.[[ArrayBufferByteLength]].
- const length = o.byte_length;
- // 6. Return length.
- return Convert<Number>(length);
+ // 5. If IsResizableArrayBuffer(O) is true, then
+ // a. Let length be O.[[ArrayBufferMaxByteLength]].
+ // 6. Else,
+ // a. Let length be O.[[ArrayBufferByteLength]].
+ // 7. Return F(length);
+ assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ return Convert<Number>(o.max_byte_length);
}
-// #sec-get-resizablearraybuffer.prototype.bytelength
-transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength(
- js-implicit context: NativeContext, receiver: JSAny)(): Number {
+// #sec-get-arraybuffer.prototype.resizable
+transitioning javascript builtin ArrayBufferPrototypeGetResizable(
+ js-implicit context: NativeContext, receiver: JSAny)(): Boolean {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get ResizableArrayBuffer.prototype.byteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get ArrayBuffer.prototype.resizable';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
// 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferByteLength]].
- const length = o.byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. Return IsResizableArrayBuffer(O).
+ if (IsResizableArrayBuffer(o)) {
+ return True;
+ }
+ return False;
}
-// #sec-get-resizablearraybuffer.prototype.maxbytelength
-transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength(
+// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
+transitioning javascript builtin
+SharedArrayBufferPrototypeGetMaxByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.maxByteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
- // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
- if (IsSharedArrayBuffer(o)) {
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferMaxByteLength]].
- const length = o.max_byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. If IsResizableArrayBuffer(O) is true, then
+ // a. Let length be O.[[ArrayBufferMaxByteLength]].
+ // 5. Else,
+ // a. Let length be O.[[ArrayBufferByteLength]].
+ // 6. Return F(length);
+ assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ return Convert<Number>(o.max_byte_length);
}
-// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
-transitioning javascript builtin
-GrowableSharedArrayBufferPrototypeGetMaxByteLength(
- js-implicit context: NativeContext, receiver: JSAny)(): Number {
+// #sec-get-sharedarraybuffer.prototype.growable
+transitioning javascript builtin SharedArrayBufferPrototypeGetGrowable(
+ js-implicit context: NativeContext, receiver: JSAny)(): Boolean {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.growable';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferMaxByteLength]].
- const length = o.max_byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. Return IsResizableArrayBuffer(O).
+ if (IsResizableArrayBuffer(o)) {
+ return True;
+ }
+ return False;
}
// #sec-arraybuffer.isview
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index c0acc90593..af1813b61d 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -214,7 +214,7 @@ extern class GlobalDictionary extends HashTable;
extern class SimpleNumberDictionary extends HashTable;
extern class EphemeronHashTable extends HashTable;
type ObjectHashTable extends HashTable
- generates 'TNode<ObjectHashTable>';
+ generates 'TNode<ObjectHashTable>' constexpr 'ObjectHashTable';
extern class NumberDictionary extends HashTable;
type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
@@ -552,8 +552,20 @@ extern class Filler extends HeapObject generates 'TNode<HeapObject>';
// but not their own class definitions:
// Like JSObject, but created from API function.
-@apiExposedInstanceTypeValue(0x420)
+@apiExposedInstanceTypeValue(0x422)
+@doNotGenerateCast
+@noVerifier
extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
+
+// TODO(gsathya): This only exists to make JSApiObject instance type into a
+// range.
+@apiExposedInstanceTypeValue(0x80A)
+@doNotGenerateCast
+@highestInstanceTypeWithinParentClassRange
+@noVerifier
+extern class JSLastDummyApiObject extends JSApiObject
+ generates 'TNode<JSObject>';
+
// Like JSApiObject, but requires access checks and/or has interceptors.
@apiExposedInstanceTypeValue(0x410)
extern class JSSpecialApiObject extends JSSpecialObject
@@ -669,6 +681,8 @@ extern macro ThrowTypeError(implicit context: Context)(
constexpr MessageTemplate, Object, Object, Object): never;
extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)(
Smi, Object, Object): void;
+extern transitioning runtime ThrowIteratorError(implicit context: Context)(
+ JSAny): never;
extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)(
JSAny): never;
@@ -1198,6 +1212,7 @@ extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
extern macro IsNoElementsProtectorCellInvalid(): bool;
extern macro IsArrayIteratorProtectorCellInvalid(): bool;
extern macro IsArraySpeciesProtectorCellInvalid(): bool;
+extern macro IsIsConcatSpreadableProtectorCellInvalid(): bool;
extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
extern macro IsMockArrayBufferAllocatorFlag(): bool;
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index c3a7f1b98c..f995299b7e 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -50,16 +50,11 @@ bool RoundUpToPageSize(size_t byte_length, size_t page_size,
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
Handle<Object> max_length, InitializedFlag initialized) {
- SharedFlag shared =
- (*target != target->native_context().array_buffer_fun() &&
- *target != target->native_context().resizable_array_buffer_fun())
- ? SharedFlag::kShared
- : SharedFlag::kNotShared;
- ResizableFlag resizable =
- (*target == target->native_context().resizable_array_buffer_fun() ||
- *target == target->native_context().growable_shared_array_buffer_fun())
- ? ResizableFlag::kResizable
- : ResizableFlag::kNotResizable;
+ SharedFlag shared = *target != target->native_context().array_buffer_fun()
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+ ResizableFlag resizable = max_length.is_null() ? ResizableFlag::kNotResizable
+ : ResizableFlag::kResizable;
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -83,12 +78,9 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
if (resizable == ResizableFlag::kNotResizable) {
backing_store =
BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ max_byte_length = byte_length;
} else {
- Handle<Object> number_max_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length,
- Object::ToInteger(isolate, max_length));
-
- if (!TryNumberToSize(*number_max_length, &max_byte_length)) {
+ if (!TryNumberToSize(*max_length, &max_byte_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
@@ -116,8 +108,8 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
}
constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
- isolate, byte_length, page_size, initial_pages, max_pages,
- kIsWasmMemory, shared);
+ isolate, byte_length, max_byte_length, page_size, initial_pages,
+ max_pages, kIsWasmMemory, shared);
}
if (!backing_store) {
// Allocation of backing store failed.
@@ -137,10 +129,7 @@ BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
DCHECK(*target == target->native_context().array_buffer_fun() ||
- *target == target->native_context().shared_array_buffer_fun() ||
- *target == target->native_context().resizable_array_buffer_fun() ||
- *target ==
- target->native_context().growable_shared_array_buffer_fun());
+ *target == target->native_context().shared_array_buffer_fun());
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -158,9 +147,22 @@ BUILTIN(ArrayBufferConstructor) {
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- Handle<Object> max_length = args.atOrUndefined(isolate, 2);
- return ConstructBuffer(isolate, target, new_target, number_length, max_length,
- InitializedFlag::kZeroInitialized);
+ Handle<Object> number_max_length;
+ if (FLAG_harmony_rab_gsab) {
+ Handle<Object> max_length;
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, max_length,
+ JSObject::ReadFromOptionsBag(
+ options, isolate->factory()->max_byte_length_string(), isolate));
+
+ if (!max_length->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, number_max_length, Object::ToInteger(isolate, max_length));
+ }
+ }
+ return ConstructBuffer(isolate, target, new_target, number_length,
+ number_max_length, InitializedFlag::kZeroInitialized);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@@ -462,45 +464,48 @@ static Object ResizeHelper(BuiltinArguments args, Isolate* isolate,
return ReadOnlyRoots(isolate).undefined_value();
}
-// ES #sec-get-growablesharedarraybuffer.prototype.bytelength
-// get GrowableSharedArrayBuffer.prototype.byteLength
-BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) {
- const char* const kMethodName =
- "get GrowableSharedArrayBuffer.prototype.byteLength";
+// ES #sec-get-sharedarraybuffer.prototype.bytelength
+// get SharedArrayBuffer.prototype.byteLength
+BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
+ const char* const kMethodName = "get SharedArrayBuffer.prototype.byteLength";
HandleScope scope(isolate);
-
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]).
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
- CHECK_RESIZABLE(true, array_buffer, kMethodName);
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(true, array_buffer, kMethodName);
- // 4. Let length be ArrayBufferByteLength(O, SeqCst).
-
- // Invariant: byte_length for GSAB is 0 (it needs to be read from the
- // BackingStore).
- DCHECK_EQ(0, array_buffer->byte_length());
+ DCHECK_EQ(array_buffer->max_byte_length(),
+ array_buffer->GetBackingStore()->max_byte_length());
- size_t byte_length =
- array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ // 4. Let length be ArrayBufferByteLength(O, SeqCst).
+ size_t byte_length;
+ if (array_buffer->is_resizable()) {
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, array_buffer->byte_length());
- // 5. Return length.
+ byte_length =
+ array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ } else {
+ byte_length = array_buffer->byte_length();
+ }
+ // 5. Return F(length).
return *isolate->factory()->NewNumberFromSize(byte_length);
}
-// ES #sec-resizablearraybuffer.prototype.resize
-// ResizableArrayBuffer.prototype.resize(new_size))
-BUILTIN(ResizableArrayBufferPrototypeResize) {
- const char* const kMethodName = "ResizableArrayBuffer.prototype.resize";
+// ES #sec-arraybuffer.prototype.resize
+// ArrayBuffer.prototype.resize(new_size))
+BUILTIN(ArrayBufferPrototypeResize) {
+ const char* const kMethodName = "ArrayBuffer.prototype.resize";
constexpr bool kIsShared = false;
return ResizeHelper(args, isolate, kMethodName, kIsShared);
}
-// ES #sec-growablesharedarraybuffer.prototype.grow
-// GrowableSharedArrayBuffer.prototype.grow(new_size))
-BUILTIN(GrowableSharedArrayBufferPrototypeGrow) {
- const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow";
+// ES #sec-sharedarraybuffer.prototype.grow
+// SharedArrayBuffer.prototype.grow(new_size))
+BUILTIN(SharedArrayBufferPrototypeGrow) {
+ const char* const kMethodName = "SharedArrayBuffer.prototype.grow";
constexpr bool kIsShared = true;
return ResizeHelper(args, isolate, kMethodName, kIsShared);
}
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 21841e382c..a1359cd422 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -46,22 +46,6 @@ void ConsoleCall(
CHECK(!isolate->has_scheduled_exception());
if (!isolate->console_delegate()) return;
HandleScope scope(isolate);
-
- // Access check. The current context has to match the context of all
- // arguments, otherwise the inspector might leak objects across contexts.
- Handle<Context> context = handle(isolate->context(), isolate);
- for (int i = 0; i < args.length(); ++i) {
- Handle<Object> argument = args.at<Object>(i);
- if (!argument->IsJSObject()) continue;
-
- Handle<JSObject> argument_obj = Handle<JSObject>::cast(argument);
- if (argument->IsAccessCheckNeeded(isolate) &&
- !isolate->MayAccess(context, argument_obj)) {
- isolate->ReportFailedAccessCheck(argument_obj);
- return;
- }
- }
-
debug::ConsoleCallArguments wrapper(args);
Handle<Object> context_id_obj = JSObject::GetDataProperty(
args.target(), isolate->factory()->console_context_id_symbol());
@@ -78,7 +62,7 @@ void ConsoleCall(
}
void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
- Logger::StartEnd se) {
+ v8::LogEventStatus se) {
if (!isolate->logger()->is_logging()) return;
HandleScope scope(isolate);
std::unique_ptr<char[]> name;
@@ -102,21 +86,21 @@ CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
BUILTIN(ConsoleTime) {
- LogTimerEvent(isolate, args, Logger::START);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kStart);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeEnd) {
- LogTimerEvent(isolate, args, Logger::END);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kEnd);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeEnd);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeStamp) {
- LogTimerEvent(isolate, args, Logger::STAMP);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kStamp);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeStamp);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index b5caebd7c4..70eb349dab 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -170,8 +170,8 @@ namespace internal {
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
ASM(BaselineOnStackReplacement, Void) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
- ASM(BaselineEnterAtBytecode, Void) \
- ASM(BaselineEnterAtNextBytecode, Void) \
+ ASM(BaselineOrInterpreterEnterAtBytecode, Void) \
+ ASM(BaselineOrInterpreterEnterAtNextBytecode, Void) \
ASM(InterpreterOnStackReplacement_ToBaseline, Void) \
\
/* Code life-cycle */ \
@@ -394,6 +394,8 @@ namespace internal {
CPP(ArrayBufferConstructor) \
CPP(ArrayBufferConstructor_DoNotInitialize) \
CPP(ArrayBufferPrototypeSlice) \
+ /* https://tc39.es/proposal-resizablearraybuffer/ */ \
+ CPP(ArrayBufferPrototypeResize) \
\
/* AsyncFunction */ \
TFS(AsyncFunctionEnter, kClosure, kReceiver) \
@@ -799,11 +801,6 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
ASM(RegExpExperimentalTrampoline, CCall) \
\
- /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \
- CPP(ResizableArrayBufferPrototypeResize) \
- CPP(GrowableSharedArrayBufferPrototypeGrow) \
- CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \
- \
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
@@ -823,7 +820,11 @@ namespace internal {
TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
+ CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
+ /* https://tc39.es/proposal-resizablearraybuffer/ */ \
+ CPP(SharedArrayBufferPrototypeGrow) \
+ \
TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 5920d9fe7c..535188c567 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -1085,6 +1085,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
+ CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
#undef CASE_FOR_FLAG
#define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \
@@ -1107,10 +1108,6 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
} while (false)
CASE_FOR_FLAG(
- "hasIndices",
- ExternalReference::address_of_harmony_regexp_match_indices_flag(),
- JSRegExp::kHasIndices);
- CASE_FOR_FLAG(
"linear",
ExternalReference::address_of_enable_experimental_regexp_engine(),
JSRegExp::kLinear);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 735d8b674f..a76650d052 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -154,13 +154,16 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
- TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> byte_offset = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSArrayBufferViewByteOffset(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(byte_offset));
+ // Default to zero if the {receiver}s buffer was detached / out of bounds.
+ Label detached_or_oob(this), not_detached_or_oob(this);
+ IsTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
+ &not_detached_or_oob);
+ BIND(&detached_or_oob);
+ Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
+
+ BIND(&not_detached_or_oob);
+ Return(
+ ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(CAST(receiver))));
}
// ES6 #sec-get-%typedarray%.prototype.length
@@ -267,6 +270,17 @@ void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<RawPtrT> dest_ptr,
std::make_pair(MachineType::UintPtr(), byte_length));
}
+void TypedArrayBuiltinsAssembler::CallCRelaxedMemmove(
+ TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length) {
+ TNode<ExternalReference> memmove =
+ ExternalConstant(ExternalReference::relaxed_memmove_function());
+ CallCFunction(memmove, MachineType::AnyTagged(),
+ std::make_pair(MachineType::Pointer(), dest_ptr),
+ std::make_pair(MachineType::Pointer(), src_ptr),
+ std::make_pair(MachineType::UintPtr(), byte_length));
+}
+
void TypedArrayBuiltinsAssembler::CallCMemcpy(TNode<RawPtrT> dest_ptr,
TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length) {
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 0ec179ac9e..bb8a15ef02 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -52,6 +52,9 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void CallCMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
+ void CallCRelaxedMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length);
+
void CallCMemcpy(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index bb936e6e46..d6be81615d 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -99,7 +99,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
count = count * element_size;
uint8_t* data = static_cast<uint8_t*>(array->DataPtr());
- std::memmove(data + to, data + from, count);
+ if (array->buffer().is_shared()) {
+ base::Relaxed_Memmove(reinterpret_cast<base::Atomic8*>(data + to),
+ reinterpret_cast<base::Atomic8*>(data + from), count);
+ } else {
+ std::memmove(data + to, data + from, count);
+ }
return *array;
}
@@ -114,7 +119,7 @@ BUILTIN(TypedArrayPrototypeFill) {
ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ if (IsBigIntTypedArrayElementsKind(kind)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
BigInt::FromObject(isolate, obj_value));
} else {
@@ -122,7 +127,7 @@ BUILTIN(TypedArrayPrototypeFill) {
Object::ToNumber(isolate, obj_value));
}
- int64_t len = array->length();
+ int64_t len = array->GetLength();
int64_t start = 0;
int64_t end = len;
@@ -142,11 +147,22 @@ BUILTIN(TypedArrayPrototypeFill) {
}
}
+ if (V8_UNLIKELY(array->IsVariableLength())) {
+ bool out_of_bounds = false;
+ array->GetLengthOrOutOfBounds(out_of_bounds);
+ if (out_of_bounds) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
+ }
+ } else if (V8_UNLIKELY(array->WasDetached())) {
+ return *array;
+ }
+
int64_t count = end - start;
if (count <= 0) return *array;
- if (V8_UNLIKELY(array->WasDetached())) return *array;
-
// Ensure processed indexes are within array bounds
DCHECK_GE(start, 0);
DCHECK_LT(start, len);
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3d813cd598..e219aec65d 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -79,6 +79,7 @@ class BuiltinArguments : public JavaScriptArguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
+#ifdef V8_RUNTIME_CALL_STATS
#define BUILTIN(name) \
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate); \
@@ -105,6 +106,21 @@ class BuiltinArguments : public JavaScriptArguments {
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
+#else // V8_RUNTIME_CALL_STATS
+#define BUILTIN(name) \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
+ \
+ V8_WARN_UNUSED_RESULT Address Builtin_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
+ BuiltinArguments args(args_length, args_object); \
+ return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate)
+#endif // V8_RUNTIME_CALL_STATS
// ----------------------------------------------------------------------------
#define CHECK_RECEIVER(Type, name, method) \
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index a10bc7c946..b12ea5d9fe 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -547,10 +547,19 @@ Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
+ // TODO(victorgomes): Check if we can cast from FastJSArrayForRead instead.
const a = Cast<FastJSArray>(o) otherwise CastError;
return %RawDownCast<FastJSArrayForCopy>(a);
}
+Cast<FastJSArrayForConcat>(implicit context: Context)(o: HeapObject):
+ FastJSArrayForConcat
+ labels CastError {
+ if (IsIsConcatSpreadableProtectorCellInvalid()) goto CastError;
+ const a = Cast<FastJSArrayForCopy>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForConcat>(a);
+}
+
Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayWithNoCustomIteration
labels CastError {
diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq
index 636f49a024..266fcaa552 100644
--- a/deps/v8/src/builtins/conversion.tq
+++ b/deps/v8/src/builtins/conversion.tq
@@ -138,7 +138,7 @@ transitioning builtin ToObject(implicit context: Context)(input: JSAny):
}
case (o: JSAnyNotSmi): {
const index: intptr = Convert<intptr>(
- o.map.in_object_properties_start_or_constructor_function_index);
+ o.map.inobject_properties_start_or_constructor_function_index);
if (index != kNoConstructorFunctionIndex)
goto WrapPrimitive(
%RawDownCast<Slot<NativeContext, JSFunction>>(index));
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 54013e7698..7a8875fee9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/register-arch.h"
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
@@ -128,11 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
- 1 * kSystemPointerSize)); // 1 ~ receiver
- __ PushReturnAddressFrom(ecx);
+ __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -283,11 +281,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
- 1 * kSystemPointerSize)); // 1 ~ receiver
- __ push(ecx);
+ __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -776,10 +771,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- Register return_pc = scratch2;
- __ PopReturnAddressTo(return_pc);
- __ add(esp, params_size);
- __ PushReturnAddressFrom(return_pc);
+ __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1102,10 +1095,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Push bytecode array.
@@ -1725,10 +1718,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(
- FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
+ FieldOperand(bytecode_array, BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
@@ -1915,11 +1908,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ PopReturnAddressTo(ecx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
+ __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -2026,11 +2017,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ PopReturnAddressTo(edx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ Push(ecx);
- __ PushReturnAddressFrom(edx);
+ __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2086,11 +2075,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// Spill argumentsList to use ecx as a scratch register.
__ movd(xmm0, ecx);
- __ PopReturnAddressTo(ecx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(ecx);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -3986,16 +3974,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movsd(Operand(esi, dst_offset), xmm0);
}
- if (FLAG_debug_code) {
- const int kTopMask = 0x3800;
- __ push(eax);
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(kTopMask));
- __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer);
- __ pop(eax);
- }
// Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
@@ -4115,19 +4096,57 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
+ // Spill the accumulator register; note that we're not within a frame, so we
+ // have to make sure to pop it before doing any GC-visible calls.
+ __ push(kInterpreterAccumulatorRegister);
+
// Get function from the frame.
Register closure = eax;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = esi;
+ __ mov(code_obj,
+ FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
+ kInterpreterBytecodeOffsetRegister);
+ __ j(equal, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ __ pop(kInterpreterAccumulatorRegister);
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
+ kInterpreterBytecodeOffsetRegister);
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Load the feedback vector.
Register feedback_vector = ecx;
__ mov(feedback_vector,
@@ -4150,14 +4169,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
feedback_vector);
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = esi;
- __ mov(code_obj,
- FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(code_obj,
- FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4209,7 +4220,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
@@ -4230,10 +4241,23 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
}
__ bind(&install_baseline_code);
+ // Pop/re-push the accumulator so that it's spilled within the below frame
+ // scope, to keep the stack valid. Use ecx for this -- we can't save it in
+ // kInterpreterAccumulatorRegister because that aliases with closure.
+ DCHECK(!AreAliased(ecx, kContextRegister, closure));
+ __ pop(ecx);
+ // Restore the clobbered context register.
+ __ mov(kContextRegister,
+ Operand(ebp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ecx);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ // Now that we're restarting, we don't have to worry about closure and
+ // accumulator aliasing, so pop the spilled accumulator directly back into
+ // the right register.
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
@@ -4241,17 +4265,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 150e3d2cb5..c2652e7eb0 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -110,7 +110,7 @@ transitioning builtin CallIteratorWithFeedback(
iteratorMethod, %MakeLazy<JSAny, JSAny>('GetLazyReceiver', receiver),
context, feedback, callSlotUnTagged);
const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
- otherwise ThrowCalledNonCallable(iteratorMethod);
+ otherwise ThrowIteratorError(receiver);
return Call(context, iteratorCallable, receiver);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 0f19f68c11..8f4bf4d06b 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -1018,7 +1018,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Include(s1.bit() | s2.bit());
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1085,10 +1085,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1243,10 +1243,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3938,12 +3938,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3951,6 +3952,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Lw(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Lw(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t6, t6);
+ __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t6, t6);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t6,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ Lw(feedback_vector,
@@ -3972,14 +4005,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = s1;
- __ Lw(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4013,6 +4038,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -4034,7 +4061,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -4058,25 +4085,29 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
}
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index ce1df3bd6a..45e1c32f82 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -1030,7 +1030,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Include(s1.bit() | s2.bit());
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1097,10 +1097,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1255,10 +1255,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3523,12 +3523,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3536,6 +3537,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t2, t2);
+ __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t2, t2);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
@@ -3556,14 +3589,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = s1;
- __ Ld(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -3597,6 +3622,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -3618,7 +3645,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3642,8 +3669,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
@@ -3651,17 +3680,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index c0b7212aac..02b76175ec 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -125,11 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(scratch, scratch);
- __ add(sp, sp, scratch);
- __ addi(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -286,11 +283,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(r4, r4);
- __ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(r4, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -407,7 +401,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ subi(r6, r6, Operand(1));
__ cmpi(r6, Operand::Zero());
__ blt(&done_loop);
- __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadAnyTaggedField(
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
@@ -725,7 +719,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmpi(r7, Operand::Zero());
__ beq(&done);
- __ ShiftLeftImm(r9, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r9, r7, Operand(kSystemPointerSizeLog2));
__ add(r8, r8, r9); // point to last arg
__ mtctr(r7);
@@ -821,7 +815,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadU64(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ ShiftLeftImm(actual_params_size, actual_params_size,
+ __ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
@@ -835,7 +829,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ add(sp, sp, params_size);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1129,12 +1124,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreU16(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load initial bytecode offset.
@@ -1162,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ ShiftRightImm(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
+ __ ShiftRightU64(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
@@ -1181,7 +1176,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
r0);
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
- __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreU64(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
@@ -1204,7 +1199,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(r6, r6, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r6, r6, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r6));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1277,7 +1272,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register scratch) {
ASM_CODE_COMMENT(masm);
__ subi(scratch, num_args, Operand(1));
- __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
@@ -1483,7 +1478,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
@@ -1572,7 +1567,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// from LAZY is always the last argument.
__ addi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r0));
// Recover arguments count.
__ subi(r3, r3,
@@ -1698,9 +1693,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, ip);
- __ StoreU64(r8, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1783,9 +1777,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, ip);
- __ StoreU64(r8, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1833,9 +1826,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r0);
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1902,7 +1894,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label copy;
Register src = r9, dest = r8;
__ addi(src, sp, Operand(-kSystemPointerSize));
- __ ShiftLeftImm(r0, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r0, r7, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, r0);
// Update stack pointer.
__ addi(dest, sp, Operand(-kSystemPointerSize));
@@ -1997,7 +1989,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ addi(r7, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
- __ ShiftLeftImm(scratch, r5, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r5, Operand(kSystemPointerSizeLog2));
__ add(r7, r7, scratch);
// Move the arguments already in the stack,
@@ -2007,7 +1999,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register src = ip, dest = r5; // r7 and r10 are context and root.
__ addi(src, sp, Operand(-kSystemPointerSize));
// Update stack pointer.
- __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, scratch);
__ addi(dest, sp, Operand(-kSystemPointerSize));
__ addi(r0, r3, Operand(1));
@@ -2028,7 +2020,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&loop);
{
__ subi(r8, r8, Operand(1));
- __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadU64(r0, MemOperand(r7, scratch));
__ StoreU64(r0, MemOperand(r5, scratch));
__ cmpi(r8, Operand::Zero());
@@ -2176,7 +2168,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ ShiftLeftImm(r10, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r10, r7, Operand(kSystemPointerSizeLog2));
__ sub(r0, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
@@ -2206,7 +2198,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ subi(r7, r7, Operand(1));
- __ ShiftLeftImm(scratch, r7, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
@@ -2520,7 +2512,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mr(r4, r5);
} else {
// Compute the argv pointer.
- __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r4, r3, Operand(kSystemPointerSizeLog2));
__ add(r4, r4, sp);
__ subi(r4, r4, Operand(kSystemPointerSize));
}
@@ -2756,7 +2748,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
__ oris(result_reg, result_reg,
Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
- __ slw(r0, result_reg, scratch);
+ __ ShiftLeftU32(r0, result_reg, scratch);
__ orx(result_reg, scratch_low, r0);
__ b(&negate);
@@ -2768,7 +2760,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ neg(scratch, scratch);
- __ slw(result_reg, scratch_low, scratch);
+ __ ShiftLeftU32(result_reg, scratch_low, scratch);
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
@@ -2831,7 +2823,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ lbz(scratch, MemOperand(scratch, 0));
__ cmpi(scratch, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
__ Move(scratch, thunk_ref);
__ isel(eq, scratch, function_address, scratch);
} else {
@@ -3025,7 +3017,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// from the API function here.
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
@@ -3327,7 +3319,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadU64(r7,
MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
- __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
@@ -3420,12 +3412,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index 03f20057e6..f79e392f48 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -1149,10 +1149,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1315,10 +1315,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3633,11 +3633,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
__ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
@@ -3646,6 +3648,46 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = a4;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(code_obj, scratch, scratch);
+ __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ __ Pop(zero_reg, kInterpreterAccumulatorRegister);
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(code_obj, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
@@ -3668,17 +3710,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = type;
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
@@ -3731,7 +3762,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3764,17 +3795,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2370f5ed57..5129cc6ee3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -121,11 +121,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(scratch, scratch);
- __ AddS64(sp, sp, scratch);
- __ AddS64(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@@ -278,11 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(r3, r3);
- __ AddS64(sp, sp, r3);
- __ AddS64(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(r3, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -870,7 +864,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LoadU64(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ LoadU32(params_size,
- FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
@@ -892,7 +886,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ AddS64(sp, sp, params_size);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1174,12 +1169,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r1, Operand(0));
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load the initial bytecode offset.
@@ -1730,9 +1725,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1816,9 +1810,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1867,9 +1860,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r6, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -3411,12 +3403,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 6646bbfa80..2f94f6205f 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -161,7 +161,7 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
} else if (src.elements_kind != elementsInfo.kind) {
- goto IfSlow;
+ goto IfElementsKindMismatch(src.elements_kind);
} else if (length > 0) {
const byteLength = typedArray.byte_length;
@@ -174,6 +174,12 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
typedArray.data_ptr, src.data_ptr, byteLength);
}
}
+ } label IfElementsKindMismatch(srcKind: ElementsKind) deferred {
+ if (IsBigInt64ElementsKind(srcKind) !=
+ IsBigInt64ElementsKind(elementsInfo.kind)) {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ }
+ goto IfSlow;
} label IfSlow deferred {
if (length > 0) {
TypedArrayCopyElements(
diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq
new file mode 100644
index 0000000000..634e17b936
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-findlast.tq
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameFindLast: constexpr string =
+ '%TypedArray%.prototype.findLast';
+
+// Continuation part of
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+// when array buffer was detached.
+transitioning builtin FindLastAllElementsDetachedContinuation(
+ implicit context: Context)(
+ array: JSTypedArray, predicate: Callable, thisArg: JSAny,
+ initialK: Number): JSAny {
+ // 6. Repeat, while k ≥ 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer was detached.
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result =
+ Call(context, predicate, thisArg, Undefined, Convert<Number>(k), array);
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(result)) {
+ return Undefined;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+transitioning macro FindLastAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
+ thisArg: JSAny): JSAny labels
+Bailout(Number) {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ // 3. Let len be O.[[ArrayLength]].
+ const length: uintptr = witness.Get().length;
+ // 5. Let k be len - 1.
+ // 6. Repeat, while k ≥ 0
+ for (let k: uintptr = length; k-- > 0;) {
+ witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ const value: JSAny = witness.Load(k);
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result = Call(
+ context, predicate, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(result)) {
+ return value;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+transitioning javascript builtin
+TypedArrayPrototypeFindLast(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ // 1. Let O be the this value.
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ // 2. Perform ? ValidateTypedArray(O).
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ // 4. If IsCallable(predicate) is false, throw a TypeError exception.
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ try {
+ return FindLastAllElements(uarray, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return FindLastAllElementsDetachedContinuation(
+ uarray, predicate, thisArg, k);
+ }
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLast);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindLast);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq
new file mode 100644
index 0000000000..4b20114c91
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-findlastindex.tq
@@ -0,0 +1,115 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameFindLastIndex: constexpr string =
+ '%TypedArray%.prototype.findIndexLast';
+
+// Continuation part of
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+// when array buffer was detached.
+transitioning builtin FindLastIndexAllElementsDetachedContinuation(
+ implicit context: Context)(
+ array: JSTypedArray, predicate: Callable, thisArg: JSAny,
+ initialK: Number): Number {
+ // 6. Repeat, while k ≥ 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer was detached.
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const indexNumber: Number = Convert<Number>(k);
+ const result =
+ Call(context, predicate, thisArg, Undefined, indexNumber, array);
+ // 6d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(result)) {
+ return indexNumber;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+transitioning macro FindLastIndexAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
+ thisArg: JSAny): Number labels
+Bailout(Number) {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ // 3. Let len be O.[[ArrayLength]].
+ const length: uintptr = witness.Get().length;
+ // 5. Let k be len - 1.
+ // 6. Repeat, while k ≥ 0
+ for (let k: uintptr = length; k-- > 0;) {
+ witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ const value: JSAny = witness.Load(k);
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const indexNumber: Number = Convert<Number>(k);
+ const result = Call(
+ context, predicate, thisArg, value, indexNumber, witness.GetStable());
+ // 6d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(result)) {
+ return indexNumber;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+transitioning javascript builtin
+TypedArrayPrototypeFindLastIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg.
+ try {
+ // 1. Let O be the this value.
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ // 2. Perform ? ValidateTypedArray(O).
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ // 4. If IsCallable(predicate) is false, throw a TypeError exception.
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+
+ try {
+ return FindLastIndexAllElements(uarray, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return FindLastIndexAllElementsDetachedContinuation(
+ uarray, predicate, thisArg, k);
+ }
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLastIndex);
+ } label IsDetached deferred {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation, kBuiltinNameFindLastIndex);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index b5c9dcb261..f4d2a40f41 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -281,7 +281,12 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
// value, true, Unordered).
// iii. Set srcByteIndex to srcByteIndex + 1.
// iv. Set targetByteIndex to targetByteIndex + 1.
- CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ if (IsSharedArrayBuffer(target.buffer)) {
+ // SABs need a relaxed memmove to preserve atomicity.
+ CallCRelaxedMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ } else {
+ CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ }
} label IfSlow deferred {
// 22. If target.[[ContentType]] is not equal to
// typedArray.[[ContentType]], throw a TypeError exception.
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index 60604c548f..2a18433f93 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -36,7 +36,12 @@ macro FastCopy(
assert(countBytes <= dest.byte_length);
assert(countBytes <= src.byte_length - startOffset);
- typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+ if (IsSharedArrayBuffer(src.buffer)) {
+ // SABs need a relaxed memmove to preserve atomicity.
+ typed_array::CallCRelaxedMemmove(dest.data_ptr, srcPtr, countBytes);
+ } else {
+ typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+ }
}
macro SlowCopy(implicit context: Context)(
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 2686005ba5..87bcb2fb59 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -65,6 +65,8 @@ extern macro TypedArrayBuiltinsAssembler::CallCMemset(
RawPtr, intptr, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemcpy(
RawPtr, RawPtr, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemmove(
+ RawPtr, RawPtr, uintptr): void;
extern macro GetTypedArrayBuffer(implicit context: Context)(JSTypedArray):
JSArrayBuffer;
extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 993f8234af..14186e3be6 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -92,7 +92,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
Label stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kFar);
+ __ StackOverflowCheck(rax, &stack_overflow, Label::kFar);
// Enter a construct frame.
{
@@ -129,10 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
- __ PushReturnAddressFrom(rcx);
+ __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
@@ -228,9 +226,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
+ // Argument count in rax.
Label stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow);
+ __ StackOverflowCheck(rax, &stack_overflow);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
// should get the formal parameter count and copy the arguments in its
@@ -281,10 +279,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
- __ PushReturnAddressFrom(rcx);
+ __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -599,9 +595,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9 : receiver
// Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
+ // Argument count in rax.
Label enough_stack_space, stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rax, &stack_overflow, Label::kNear);
__ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
@@ -880,10 +876,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- Register return_pc = scratch2;
- __ PopReturnAddressTo(return_pc);
- __ addq(rsp, params_size);
- __ PushReturnAddressFrom(return_pc);
+ __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1187,10 +1181,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Load initial bytecode offset.
@@ -1396,7 +1390,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- __ StackOverflowCheck(rcx, rdx, &stack_overflow);
+ __ StackOverflowCheck(rcx, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -1457,7 +1451,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Add a stack check before pushing arguments.
- __ StackOverflowCheck(rax, r8, &stack_overflow);
+ __ StackOverflowCheck(rax, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -1704,11 +1698,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by
// writing a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ movw(
- FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
- Immediate(0));
+ __ movw(FieldOperand(bytecode_array,
+ BytecodeArray::kOsrLoopNestingLevelOffset),
+ Immediate(0));
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
@@ -1899,11 +1893,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2006,11 +1998,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2059,11 +2049,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ j(below, &done, Label::kNear);
__ movq(rdx, args[3]); // new.target
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2120,7 +2109,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- __ StackOverflowCheck(rcx, r8, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rcx, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
// Move the arguments already in the stack,
@@ -2222,7 +2211,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- __ StackOverflowCheck(r8, r12, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(r8, &stack_overflow, Label::kNear);
// Forward the arguments from the caller frame.
// Move the arguments already in the stack,
@@ -3345,16 +3334,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass the
// number of slots to remove in a Register as an argument.
- Register return_addr = rbx;
- __ popq(return_addr);
- Register caller_frame_slots_count = param_count;
- // Add one to also pop the receiver. The receiver is passed to a JSFunction
- // over the stack but is neither included in the number of parameters passed
- // to this function nor in the number of parameters expected in this function.
- __ addq(caller_frame_slots_count, Immediate(1));
- __ shlq(caller_frame_slots_count, Immediate(kSystemPointerSizeLog2));
- __ addq(rsp, caller_frame_slots_count);
- __ pushq(return_addr);
+ __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// --------------------------------------------------------------------------
@@ -4377,12 +4358,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ pushq(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -4390,8 +4372,44 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = rbx;
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ j(equal, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ }
+
// Load the feedback vector.
- Register feedback_vector = rbx;
+ Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
@@ -4412,19 +4430,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
feedback_vector);
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = rbx;
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
- }
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4434,7 +4439,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
- Register get_baseline_pc = rax;
+ Register get_baseline_pc = r11;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
@@ -4457,6 +4462,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ pushq(kInterpreterAccumulatorRegister);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3);
@@ -4474,7 +4480,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
@@ -4497,8 +4503,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ pushq(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ popq(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
@@ -4506,17 +4514,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {