summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc150
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc156
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq5
-rw-r--r--deps/v8/src/builtins/base.tq117
-rw-r--r--deps/v8/src/builtins/builtins-api.cc25
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc42
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc37
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc52
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h108
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h28
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc108
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h16
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc94
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc26
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc24
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc51
-rw-r--r--deps/v8/src/builtins/builtins-string.tq2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h14
-rw-r--r--deps/v8/src/builtins/builtins-utils.h23
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc186
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc79
-rw-r--r--deps/v8/src/builtins/builtins.cc2
-rw-r--r--deps/v8/src/builtins/cast.tq4
-rw-r--r--deps/v8/src/builtins/convert.tq6
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc265
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq32
-rw-r--r--deps/v8/src/builtins/iterator.tq8
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc104
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc104
-rw-r--r--deps/v8/src/builtins/number.tq2
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc159
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq52
-rw-r--r--deps/v8/src/builtins/promise-misc.tq59
-rw-r--r--deps/v8/src/builtins/promise-reaction-job.tq50
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq9
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc363
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc7
-rw-r--r--deps/v8/src/builtins/string-repeat.tq2
-rw-r--r--deps/v8/src/builtins/torque-internal.tq11
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq2
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc619
56 files changed, 2412 insertions, 979 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 93caa8847e..49f578d1fd 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -6,15 +6,15 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -65,10 +66,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -82,7 +89,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
@@ -413,7 +420,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
@@ -926,18 +933,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode;
@@ -964,7 +980,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
// Create cmp, cmpne, ..., cmpne to check for a return bytecode.
Condition flag = al;
@@ -977,9 +993,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ b(if_return, eq);
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ b(ne, &not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1085,7 +1114,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r9, sp, Operand(r4));
- LoadRealStackLimit(masm, r2);
+ LoadStackLimit(masm, r2, StackLimitKind::kRealStackLimit);
__ cmp(r9, Operand(r2));
__ b(lo, &stack_overflow);
@@ -1111,6 +1140,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r9, Operand::Zero());
__ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, r4, StackLimitKind::kInterruptStackLimit);
+ __ cmp(sp, r4);
+ __ b(lo, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1143,7 +1180,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2,
+ kInterpreterBytecodeOffsetRegister, r1, r2, r3,
&do_return);
__ jmp(&do_dispatch);
@@ -1152,6 +1189,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ str(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
+ __ str(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1354,6 +1415,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(ge, &okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1373,6 +1443,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ b(eq, &function_entry_bytecode);
+
// Load the current bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1380,15 +1456,25 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2,
+ kInterpreterBytecodeOffsetRegister, r1, r2, r3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1994,7 +2080,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
- LoadRealStackLimit(masm, remaining_stack_size);
+ LoadStackLimit(masm, remaining_stack_size,
+ StackLimitKind::kRealStackLimit);
__ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack.
@@ -2228,7 +2315,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
@@ -2433,6 +2520,35 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(r8);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(
+ WasmDebugBreakFrameConstants::kFirstPushedFpReg);
+ constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(
+ WasmDebugBreakFrameConstants::kLastPushedFpReg);
+
+ // Store gp parameter registers.
+ __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
+ // Store fp parameter registers.
+ __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
+ __ ldm(ia_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index d4a4cbe0eb..9c38ae085e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -6,15 +6,15 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#if defined(V8_OS_WIN)
@@ -34,6 +35,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ CodeEntry();
+
__ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -65,10 +68,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -85,7 +94,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch);
@@ -465,7 +474,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, x10);
+ LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow);
@@ -689,10 +698,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// that.
{
Assembler::BlockPoolsScope block_pools(masm);
- __ bind(&handler_entry);
// Store the current pc as the handler offset. It's used later to create the
// handler table.
+ __ BindExceptionHandler(&handler_entry);
masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
// Caught exception: Store result (exception) in the pending exception
@@ -1049,17 +1058,26 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+ __ Mov(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -1096,9 +1114,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ B(ne, &not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Mov(bytecode_offset, original_bytecode_offset);
+ __ B(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
+
+ __ Bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1180,7 +1211,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
__ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp, cp, closure);
+ __ Push<TurboAssembler::kSignLR>(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Reset code age.
@@ -1213,7 +1244,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ Cmp(x10, scratch);
}
__ B(lo, &stack_overflow);
@@ -1243,6 +1274,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, x10, StackLimitKind::kInterruptStackLimit);
+ __ Cmp(sp, x10);
+ __ B(lo, &stack_check_interrupt);
+ __ Bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1274,7 +1313,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2,
+ kInterpreterBytecodeOffsetRegister, x1, x2, x3,
&do_return);
__ B(&do_dispatch);
@@ -1283,6 +1322,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Str(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(x10, kInterpreterBytecodeOffsetRegister);
+ __ Str(x10, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1515,6 +1578,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ B(ge, &okay);
+ __ Unreachable();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1531,6 +1603,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ B(eq, &function_entry_bytecode);
+
// Load the current bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1538,15 +1616,25 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2,
+ kInterpreterBytecodeOffsetRegister, x1, x2, x3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ B(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1608,7 +1696,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore fp, lr.
__ Mov(sp, fp);
- __ Pop(fp, lr);
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin);
@@ -1989,7 +2077,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
namespace {
void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ Push(lr, fp);
+ __ Push<TurboAssembler::kSignLR>(lr, fp);
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
@@ -2005,7 +2093,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Mov(sp, fp);
- __ Pop(fp, lr);
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
// Drop actual parameters and receiver.
__ SmiUntag(x10);
@@ -2375,7 +2463,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- LoadRealStackLimit(masm, x10);
+ LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
@@ -2714,7 +2802,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
adapt_arguments_in_place;
- __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
// When the difference between argc_actual and argc_expected is odd, we
@@ -2950,6 +3038,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(x8);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ PushXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ PushDRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ PopDRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ PopXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3577,9 +3687,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
- __ Poke(lr, 0); // Store the return address.
+ __ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
__ Blr(x10); // Call the C++ function.
- __ Peek(lr, 0); // Return to calling code.
+ __ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState();
__ Ret();
}
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 0ba3ea4030..52bcc75d19 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -61,9 +61,8 @@ namespace array {
GetFromIndex(context: Context, length: Number, arguments: Arguments): Number {
// 4. If fromIndex is present, let n be ? ToInteger(fromIndex);
// else let n be len - 1.
- const n: Number = arguments.length < 2 ?
- length - 1 :
- ToInteger_Inline(arguments[1], kTruncateMinusZero);
+ const n: Number =
+ arguments.length < 2 ? length - 1 : ToInteger_Inline(arguments[1]);
// 5. If n >= 0, then.
let k: Number = SmiConstant(0);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 3aa75c7413..7d87a55e88 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -29,6 +29,8 @@ type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
generates 'TNode<Object>' constexpr 'ObjectPtr';
type Smi extends StrongTagged generates 'TNode<Smi>' constexpr 'Smi';
+type TaggedIndex extends StrongTagged
+ generates 'TNode<TaggedIndex>' constexpr 'TaggedIndex';
// A possibly cleared weak pointer with a bit pattern that distinguishes it from
// strong HeapObject pointers and Smi values.
type WeakHeapObject extends Tagged;
@@ -85,7 +87,7 @@ type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32
generates 'TNode<Int32T>' constexpr 'int31_t';
type uint31 extends uint32
- generates 'TNode<Uint32T>' constexpr 'uint31_t';
+ generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int16 extends int31
generates 'TNode<Int16T>' constexpr 'int16_t';
type uint16 extends uint31
@@ -104,6 +106,9 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
+// A Smi value containing a bitfield struct as its integer data.
+type SmiTagged<T : type extends uint31> extends Smi;
+
// WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing
// signalling NaN bit-pattern. So it's memory size is that of float64 and
@@ -156,7 +161,7 @@ type LayoutDescriptor extends ByteArray
generates 'TNode<LayoutDescriptor>';
extern class TransitionArray extends WeakFixedArray;
-type InstanceType extends uint16 constexpr 'v8::internal::InstanceType';
+type InstanceType extends uint16 constexpr 'InstanceType';
type NoSharedNameSentinel extends Smi;
@@ -177,9 +182,6 @@ type Callable = JSFunction|JSBoundFunction|CallableJSProxy|CallableApiObject;
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
-type ToIntegerTruncationMode
-constexpr 'CodeStubAssembler::ToIntegerTruncationMode';
-
extern enum UnicodeEncoding { UTF16, UTF32 }
// Promise constants
@@ -345,10 +347,6 @@ const kSloppyArgumentsContextIndex: constexpr int31
const kSloppyArgumentsParameterMapStart: constexpr int31
generates 'SloppyArgumentsElements::kParameterMapStart';
-const kTruncateMinusZero: constexpr ToIntegerTruncationMode
- generates 'CodeStubAssembler::ToIntegerTruncationMode::kTruncateMinusZero'
- ;
-
extern enum PrimitiveType { kString, kBoolean, kSymbol, kNumber }
const kNameDictionaryInitialCapacity:
@@ -433,10 +431,53 @@ extern macro Comment(constexpr string);
extern macro StaticAssert(bool);
extern macro Print(Object);
extern macro DebugBreak();
-extern transitioning macro ToInteger_Inline(implicit context: Context)(JSAny):
- Number;
-extern transitioning macro ToInteger_Inline(implicit context: Context)(
- JSAny, constexpr ToIntegerTruncationMode): Number;
+
+// ES6 7.1.4 ToInteger ( argument )
+transitioning macro ToIntegerImpl(implicit context: Context)(input: Object):
+ Number {
+ let input = input;
+
+ while (true) {
+ typeswitch (input) {
+ case (s: Smi): {
+ return s;
+ }
+ case (hn: HeapNumber): {
+ let value = Convert<float64>(hn);
+ if (Float64IsNaN(value)) return SmiConstant(0);
+ value = math::Float64Trunc(value);
+ // ToInteger normalizes -0 to +0.
+ if (value == 0.0) return SmiConstant(0);
+ const result = ChangeFloat64ToTagged(value);
+ assert(IsNumberNormalized(result));
+ return result;
+ }
+ case (ho: HeapObject): {
+ input = math::NonNumberToNumber(ho);
+ }
+ }
+ }
+ unreachable;
+}
+
+transitioning builtin ToInteger(implicit context: Context)(input: Object):
+ Number {
+ return ToIntegerImpl(input);
+}
+
+@export
+transitioning macro ToInteger_Inline(implicit context: Context)(input: Object):
+ Number {
+ typeswitch (input) {
+ case (s: Smi): {
+ return s;
+ }
+ case (ho: HeapObject): {
+ return ToInteger(ho);
+ }
+ }
+}
+
extern transitioning macro ToLength_Inline(implicit context: Context)(JSAny):
Number;
extern transitioning macro ToNumber_Inline(implicit context: Context)(JSAny):
@@ -459,7 +500,8 @@ extern transitioning builtin HasProperty(implicit context: Context)(
JSAny, JSAny): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, JSAny): Boolean;
-extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny;
+extern builtin LoadIC(
+ Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny;
extern macro CollectCallFeedback(
JSAny, Context, Undefined | FeedbackVector, uintptr);
@@ -514,7 +556,6 @@ extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
extern builtin ToObject(Context, JSAny): JSReceiver;
extern macro ToObject_Inline(Context, JSAny): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
-extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
transitioning builtin ToString(context: Context, o: JSAny): String {
return ToStringImpl(context, o);
@@ -724,6 +765,10 @@ ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool;
extern operator '>=' macro
ConstexprInt31GreaterThanEqual(
constexpr int31, constexpr int31): constexpr bool;
+extern operator '==' macro ConstexprInt32Equal(
+ constexpr int32, constexpr int32): constexpr bool;
+extern operator '!=' macro ConstexprInt32NotEqual(
+ constexpr int32, constexpr int32): constexpr bool;
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '==' macro Word32Equal(uint32, uint32): bool;
@@ -833,7 +878,14 @@ extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi;
extern macro SmiUntag(Smi): intptr;
+macro SmiUntag<T: type>(value: SmiTagged<T>): T {
+ return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
+}
extern macro SmiToInt32(Smi): int32;
+extern macro TaggedIndexToIntPtr(TaggedIndex): intptr;
+extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
+extern macro TaggedIndexToSmi(TaggedIndex): Smi;
+extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
@@ -888,6 +940,10 @@ macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
return %RawDownCast<A>(o);
}
+macro UnsafeConstCast<T: type>(r: const &T):&T {
+ return %RawDownCast<&T>(r);
+}
+
extern macro FixedArrayMapConstant(): Map;
extern macro FixedDoubleArrayMapConstant(): Map;
extern macro FixedCOWArrayMapConstant(): Map;
@@ -1206,7 +1262,7 @@ macro ChangeSafeIntegerNumberToUintPtr(value: Number):
transitioning macro ToUintPtr(implicit context: Context)(value: JSAny):
uintptr labels IfLessThanZero, IfUIntPtrOverflow, IfSafeIntegerOverflow {
if (value == Undefined) return 0;
- const indexNumber = ToInteger_Inline(value, kTruncateMinusZero);
+ const indexNumber = ToInteger_Inline(value);
return TryNumberToUintPtr(indexNumber, kModeValueIsAnyNumber)
otherwise IfLessThanZero, IfUIntPtrOverflow, IfSafeIntegerOverflow;
}
@@ -1220,7 +1276,7 @@ transitioning macro ToUintPtr(implicit context: Context)(value: JSAny):
transitioning macro ToIndex(implicit context: Context)(value: JSAny):
uintptr labels IfRangeError {
if (value == Undefined) return 0;
- const indexNumber = ToInteger_Inline(value, kTruncateMinusZero);
+ const indexNumber = ToInteger_Inline(value);
// Less than 0 case, uintptr range overflow and safe integer range overflow
// imply IfRangeError.
return TryNumberToUintPtr(indexNumber, kModeValueIsAnyNumber)
@@ -1293,7 +1349,7 @@ extern macro IsOneByteStringInstanceType(InstanceType): bool;
@export
transitioning macro ConvertToRelativeIndex(implicit context: Context)(
index: JSAny, length: uintptr): uintptr {
- const indexNumber: Number = ToInteger_Inline(index, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(index);
return ConvertToRelativeIndex(indexNumber, length);
}
@@ -1340,7 +1396,7 @@ macro ConvertToRelativeIndex(indexNumber: Number, length: uintptr): uintptr {
@export
transitioning macro ClampToIndexRange(implicit context: Context)(
index: JSAny, limit: uintptr): uintptr {
- const indexNumber: Number = ToInteger_Inline(index, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(index);
return ClampToIndexRange(indexNumber, limit);
}
@@ -1560,6 +1616,29 @@ builtin CheckNumberInRange(implicit context: Context)(
}
}
+// Assert that the objects satisfy SameValue or are both the hole.
+builtin CheckSameObject(implicit context: Context)(
+ lhs: Object, rhs: Object): Undefined {
+ typeswitch (lhs) {
+ case (TheHole): {
+ if (rhs == TheHole) return Undefined;
+ }
+ case (a: JSAny): {
+ typeswitch (rhs) {
+ case (b: JSAny): {
+ if (SameValue(a, b)) return Undefined;
+ }
+ case (Object): {
+ }
+ }
+ }
+ case (Object): {
+ }
+ }
+ Print('Distinct or unexpected values in CheckSameObject');
+ unreachable;
+}
+
macro ReplaceTheHoleWithUndefined(o: JSAny|TheHole): JSAny {
typeswitch (o) {
case (TheHole): {
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 0c30e52154..6eb6f87c74 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -105,9 +105,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
Object data_obj = call_data.data();
- FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
- *new_target, args.address_of_arg_at(1),
- args.length() - 1);
+ FunctionCallbackArguments custom(
+ isolate, data_obj, *function, raw_holder, *new_target,
+ args.address_of_first_argument(), args.length() - 1);
Handle<Object> result = custom.Call(call_data);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
@@ -206,6 +206,18 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
} else {
argv = new Address[frame_argc];
}
+#ifdef V8_REVERSE_JSARGS
+ argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
+ argv[BuiltinArguments::kTargetOffset] = function->ptr();
+ argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
+ argv[BuiltinArguments::kPaddingOffset] =
+ ReadOnlyRoots(isolate).the_hole_value().ptr();
+ int cursor = BuiltinArguments::kNumExtraArgs;
+ argv[cursor++] = receiver->ptr();
+ for (int i = 0; i < argc; ++i) {
+ argv[cursor++] = args[i]->ptr();
+ }
+#else
int cursor = frame_argc - 1;
argv[cursor--] = receiver->ptr();
for (int i = 0; i < argc; ++i) {
@@ -217,6 +229,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
argv[BuiltinArguments::kTargetOffset] = function->ptr();
argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
+#endif
MaybeHandle<Object> result;
{
RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
@@ -269,9 +282,9 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
- FunctionCallbackArguments custom(isolate, call_data.data(), constructor,
- obj, new_target, args.address_of_arg_at(1),
- args.length() - 1);
+ FunctionCallbackArguments custom(
+ isolate, call_data.data(), constructor, obj, new_target,
+ args.address_of_first_argument(), args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 1892406305..734b9b634a 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -50,8 +50,7 @@ TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
TNode<Number> k_number = ChangeUintPtrToTagged(k);
TNode<Object> mapped_value =
- CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
- k_value, k_number, o());
+ Call(context(), callbackfn(), this_arg(), k_value, k_number, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
// 8. d. Perform ? Set(A, Pk, mapped_value, true).
@@ -544,8 +543,8 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
};
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
@@ -1071,8 +1070,8 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
}
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIncludes, argc, context);
@@ -1112,8 +1111,8 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIndexOf, argc, context);
@@ -1420,9 +1419,9 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// 1. Set element to ? Call(mapperFunction, thisArg , « element,
// sourceIndex, source »).
- element_maybe_smi = CallJS(CodeFactory::Call(isolate()), context,
- mapper_function.value(), this_arg.value(),
- element_maybe_smi, source_index, source);
+ element_maybe_smi =
+ Call(context, mapper_function.value(), this_arg.value(),
+ element_maybe_smi, source_index, source);
}
// iii. Let shouldFlatten be false.
@@ -1541,8 +1540,8 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
const TNode<Object> receiver = args.GetReceiver();
@@ -1583,8 +1582,8 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
const TNode<Object> receiver = args.GetReceiver();
@@ -1871,8 +1870,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
BIND(&call_runtime);
{
- TailCallRuntime(Runtime::kNewArray, context, array_function, array_size,
- array_function, allocation_site);
+ TailCallRuntimeNewArray(context, array_function, array_size, array_function,
+ allocation_site);
}
}
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index acc7721465..592400415b 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -311,8 +311,8 @@ AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
@@ -329,8 +329,8 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
@@ -347,8 +347,8 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 1515605649..b138515af6 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
namespace {
class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
@@ -130,9 +128,11 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
BIND(&if_isnotundefined);
}
- const TNode<Object> iter_result = CallJS(
- CodeFactory::Call(isolate()), context, method, sync_iterator, sent_value);
- GotoIfException(iter_result, &reject_promise, &var_exception);
+ TNode<Object> iter_result;
+ {
+ ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
+ iter_result = Call(context, method, sync_iterator, sent_value);
+ }
TNode<Object> value;
TNode<Oddball> done;
@@ -144,10 +144,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
CSA_ASSERT(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
- const TNode<Object> value_wrapper = CallBuiltin(
- Builtins::kPromiseResolve, native_context, promise_fun, value);
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
- GotoIfException(value_wrapper, &reject_promise, &var_exception);
+ TNode<Object> value_wrapper;
+ {
+ ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
+ value_wrapper = CallBuiltin(Builtins::kPromiseResolve, native_context,
+ promise_fun, value);
+ }
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
@@ -200,17 +203,17 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&if_slowpath);
{
+ ScopedExceptionHandler handler(this, if_exception, var_exception);
+
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
const TNode<Object> done =
GetProperty(context, iter_result, factory()->done_string());
- GotoIfException(done, if_exception, var_exception);
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
const TNode<Object> value =
GetProperty(context, iter_result, factory()->value_string());
- GotoIfException(value, if_exception, var_exception);
var_value = value;
var_done = done;
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index cfe82594df..1b53e9ca8e 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -65,14 +65,14 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
- TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
TNode<Object> arguments_list, TNode<Context> context) {
Label if_done(this), if_arguments(this), if_array(this),
if_holey_array(this, Label::kDeferred),
if_runtime(this, Label::kDeferred);
// Perform appropriate checks on {target} (and {new_target} first).
- if (new_target == nullptr) {
+ if (!new_target) {
// Check that {target} is Callable.
Label if_target_callable(this),
if_target_not_callable(this, Label::kDeferred);
@@ -102,12 +102,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// Check that {new_target} is a Constructor.
Label if_new_target_constructor(this),
if_new_target_not_constructor(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(new_target), &if_new_target_not_constructor);
- Branch(IsConstructor(CAST(new_target)), &if_new_target_constructor,
+ GotoIf(TaggedIsSmi(*new_target), &if_new_target_not_constructor);
+ Branch(IsConstructor(CAST(*new_target)), &if_new_target_constructor,
&if_new_target_not_constructor);
BIND(&if_new_target_not_constructor);
{
- CallRuntime(Runtime::kThrowNotConstructor, context, new_target);
+ CallRuntime(Runtime::kThrowNotConstructor, context, *new_target);
Unreachable();
}
BIND(&if_new_target_constructor);
@@ -215,12 +215,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_not_double);
{
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
elements);
}
}
@@ -240,7 +240,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// boxed as HeapNumbers, then tail calls CallVarargs/ConstructVarargs depending
// on whether {new_target} was passed.
void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
- TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
TNode<FixedDoubleArray> elements, TNode<Int32T> length,
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
const ElementsKind new_kind = PACKED_ELEMENTS;
@@ -258,19 +258,19 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
new_elements, intptr_length, intptr_length,
barrier_mode);
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, new_elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
new_elements);
}
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
- TNode<Object> target, TNode<Object> new_target, TNode<Object> spread,
- TNode<Int32T> args_count, TNode<Context> context) {
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
+ TNode<Object> spread, TNode<Int32T> args_count, TNode<Context> context) {
Label if_smiorobject(this), if_double(this),
if_generic(this, Label::kDeferred);
@@ -316,7 +316,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_generic);
{
- Label if_iterator_fn_not_callable(this, Label::kDeferred);
+ Label if_iterator_fn_not_callable(this, Label::kDeferred),
+ if_iterator_is_null_or_undefined(this, Label::kDeferred);
+
+ GotoIf(IsNullOrUndefined(spread), &if_iterator_is_null_or_undefined);
+
TNode<Object> iterator_fn =
GetProperty(context, spread, IteratorSymbolConstant());
GotoIfNot(TaggedIsCallable(iterator_fn), &if_iterator_fn_not_callable);
@@ -333,6 +337,10 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_iterator_fn_not_callable);
ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+
+ BIND(&if_iterator_is_null_or_undefined);
+ CallRuntime(Runtime::kThrowSpreadArgIsNullOrUndefined, context, spread);
+ Unreachable();
}
BIND(&if_smiorobject);
@@ -342,12 +350,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
CSA_ASSERT(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
elements);
}
}
@@ -363,7 +371,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = nullptr;
+ base::Optional<TNode<Object>> new_target = base::nullopt;
TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
@@ -371,7 +379,7 @@ TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = nullptr;
+ base::Optional<TNode<Object>> new_target = base::nullopt;
TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
TNode<Int32T> args_count =
UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index a15f31dd09..d54e4405e0 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -16,16 +16,17 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
void CallOrConstructWithArrayLike(TNode<Object> target,
- SloppyTNode<Object> new_target,
+ base::Optional<TNode<Object>> new_target,
TNode<Object> arguments_list,
TNode<Context> context);
void CallOrConstructDoubleVarargs(TNode<Object> target,
- SloppyTNode<Object> new_target,
+ base::Optional<TNode<Object>> new_target,
TNode<FixedDoubleArray> elements,
TNode<Int32T> length,
TNode<Int32T> args_count,
TNode<Context> context, TNode<Int32T> kind);
- void CallOrConstructWithSpread(TNode<Object> target, TNode<Object> new_target,
+ void CallOrConstructWithSpread(TNode<Object> target,
+ base::Optional<TNode<Object>> new_target,
TNode<Object> spread, TNode<Int32T> args_count,
TNode<Context> context);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index df758e0e9f..df0ebce993 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -14,6 +14,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -150,8 +151,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
TNode<Object> add_function, TNode<Object> key_value,
Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
- compiler::CodeAssemblerScopedExceptionHandler handler(this, if_exception,
- var_exception);
+ compiler::ScopedExceptionHandler handler(this, if_exception, var_exception);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
TorqueStructKeyValuePair pair =
@@ -161,12 +161,10 @@ void BaseCollectionsAssembler::AddConstructorEntry(
: LoadKeyValuePair(context, key_value);
TNode<Object> key_n = pair.key;
TNode<Object> value_n = pair.value;
- CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
- key_n, value_n);
+ Call(context, add_function, collection, key_n, value_n);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
- CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
- key_value);
+ Call(context, add_function, collection, key_value);
}
}
@@ -854,8 +852,8 @@ TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable(
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kMap, isolate()->factory()->Map_string(), new_target,
@@ -864,8 +862,8 @@ TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kSet, isolate()->factory()->Set_string(), new_target,
@@ -2035,8 +2033,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
// Invoke the {callback} passing the {entry_key}, {entry_value} and the
// {receiver}.
- CallJS(CodeFactory::Call(isolate()), context, callback, this_arg,
- entry_value, entry_key, receiver);
+ Call(context, callback, this_arg, entry_value, entry_key, receiver);
// Continue with the next entry.
var_index = index;
@@ -2266,8 +2263,7 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
// Invoke the {callback} passing the {entry_key} (twice) and the {receiver}.
- CallJS(CodeFactory::Call(isolate()), context, callback, this_arg, entry_key,
- entry_key, receiver);
+ Call(context, callback, this_arg, entry_key, entry_key, receiver);
// Continue with the next entry.
var_index = index;
@@ -2513,8 +2509,9 @@ TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
TNode<FixedArray> table = CAST(
AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
- RootIndex map_root_index = EphemeronHashTableShape::GetMapRootIndex();
- StoreMapNoWriteBarrier(table, map_root_index);
+ TNode<Map> map =
+ HeapConstant(EphemeronHashTableShape::GetMap(ReadOnlyRoots(isolate())));
+ StoreMapNoWriteBarrier(table, map);
StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table,
@@ -2703,8 +2700,8 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string(),
@@ -2713,8 +2710,8 @@ TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string(),
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 01f3db63f3..c706ce9306 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -38,7 +38,7 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
@@ -46,7 +46,7 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
TNode<Int32T> args_count =
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
@@ -160,8 +160,8 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
}
TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target) {
+ TNode<Context> context, TNode<JSFunction> target,
+ TNode<JSReceiver> new_target) {
TVARIABLE(JSObject, var_obj);
Label call_runtime(this), end(this);
@@ -177,8 +177,8 @@ TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
}
TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target, Label* call_runtime) {
+ TNode<Context> context, TNode<JSFunction> target,
+ TNode<JSReceiver> new_target, Label* call_runtime) {
// Verify that the new target is a JSFunction.
Label end(this);
TNode<JSFunction> new_target_func =
@@ -284,7 +284,7 @@ TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
}
TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
- TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context) {
Label call_runtime(this, Label::kDeferred), end(this);
@@ -311,8 +311,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
BIND(&call_runtime);
{
result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context,
- maybe_feedback_vector, SmiTag(Signed(slot)),
- pattern, flags));
+ maybe_feedback_vector, slot, pattern, flags));
Goto(&end);
}
@@ -323,7 +322,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
TNode<HeapObject> maybe_feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -333,7 +332,7 @@ TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
Label zero_capacity(this), cow_elements(this), fast_elements(this),
@@ -356,7 +355,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<ArrayBoilerplateDescription> constant_elements =
CAST(Parameter(Descriptor::kConstantElements));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -371,13 +370,12 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
int const flags =
AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow;
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(Signed(slot)), constant_elements,
- SmiConstant(flags)));
+ slot, constant_elements, SmiConstant(flags)));
}
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
@@ -395,8 +393,10 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
// TODO(cbruni): create the AllocationSite in CSA.
BIND(&initialize_allocation_site);
{
- allocation_site =
- CreateAllocationSiteInFeedbackVector(feedback_vector, slot);
+ allocation_site = CreateAllocationSiteInFeedbackVector(
+ feedback_vector,
+ // TODO(v8:10047): pass slot as TaggedIndex here
+ Unsigned(TaggedIndexToIntPtr(slot)));
Goto(&create_empty_array);
}
@@ -421,7 +421,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSArray> result =
EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
@@ -429,7 +429,7 @@ TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
}
TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime) {
TNode<Object> maybe_allocation_site =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
@@ -609,7 +609,7 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> copy =
EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
Return(copy);
@@ -619,8 +619,8 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
CAST(Parameter(Descriptor::kObjectBoilerplateDescription));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(Signed(slot)), object_boilerplate_description, flags);
+ TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, slot,
+ object_boilerplate_description, flags);
}
// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
@@ -644,8 +644,8 @@ TNode<JSObject> ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
// ES #sec-object-constructor
TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
int const kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
@@ -694,8 +694,8 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
// 1. If no arguments were passed to this function invocation, let n be +0.
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 761a6c7adb..d6a698ddf6 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -21,30 +21,30 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
ScopeType scope_type);
TNode<JSRegExp> EmitCreateRegExpLiteral(
- TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context);
TNode<JSArray> EmitCreateShallowArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode);
TNode<JSArray> EmitCreateEmptyArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context);
TNode<HeapObject> EmitCreateShallowObjectLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime);
TNode<JSObject> EmitCreateEmptyObjectLiteral(TNode<Context> context);
- TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target);
+ TNode<JSObject> EmitFastNewObject(TNode<Context> context,
+ TNode<JSFunction> target,
+ TNode<JSReceiver> new_target);
- TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target,
+ TNode<JSObject> EmitFastNewObject(TNode<Context> context,
+ TNode<JSFunction> target,
+ TNode<JSReceiver> new_target,
Label* call_runtime);
};
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 4d0fead861..e524f39b5f 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -39,12 +39,9 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
{
// Invoke the {exotic_to_prim} method on the {input} with a string
// representation of the {hint}.
- Callable callable =
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined);
TNode<String> hint_string =
HeapConstant(factory()->ToPrimitiveHintString(hint));
- TNode<Object> result =
- CallJS(callable, context, exotic_to_prim, input, hint_string);
+ TNode<Object> result = Call(context, exotic_to_prim, input, hint_string);
// Verify that the {result} is actually a primitive.
Label if_resultisprimitive(this),
@@ -248,9 +245,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
BIND(&if_methodiscallable);
{
// Call the {method} on the {input}.
- Callable callable = CodeFactory::Call(
- isolate(), ConvertReceiverMode::kNotNullOrUndefined);
- TNode<Object> result = CallJS(callable, context, method, input);
+ TNode<Object> result = Call(context, method, input);
var_result = result;
// Return the {result} if it is a primitive.
@@ -381,20 +376,6 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
}
}
-TF_BUILTIN(ToInteger, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(ToInteger(context, input, kNoTruncation));
-}
-
-TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(ToInteger(context, input, kTruncateMinusZero));
-}
-
// ES6 section 7.1.13 ToObject (argument)
TF_BUILTIN(ToObject, CodeStubAssembler) {
Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 1e2cfb9a31..7ed38062c8 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -180,7 +180,7 @@ namespace internal {
TFC(NewArgumentsElements, NewArgumentsElements) \
\
/* Debugger */ \
- TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \
ASM(FrameDropperTrampoline, FrameDropperTrampoline) \
ASM(HandleDebuggerStatement, ContextOnly) \
\
@@ -200,8 +200,6 @@ namespace internal {
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(ToNumeric, TypeConversion) \
TFC(NumberToString, TypeConversion) \
- TFC(ToInteger, TypeConversion) \
- TFC(ToInteger_TruncateMinusZero, TypeConversion) \
TFC(ToLength, TypeConversion) \
TFC(Typeof, Typeof) \
TFC(GetSuperConstructor, Typeof) \
@@ -311,20 +309,20 @@ namespace internal {
kFromIndex) \
TFS(ArrayIncludesHoleyDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
- TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayIncludes, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
TFS(ArrayIndexOfSmiOrObject, kElements, kSearchElement, kLength, kFromIndex) \
TFS(ArrayIndexOfPackedDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
TFS(ArrayIndexOfHoleyDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
- TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayIndexOf, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
- TFJ(ArrayPrototypePop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePop, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
- TFJ(ArrayPrototypePush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePush, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
/* ES6 #sec-array.prototype.unshift */ \
@@ -346,9 +344,9 @@ namespace internal {
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
kMapperFunction, kThisArg) \
/* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat */ \
- TFJ(ArrayPrototypeFlat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeFlat, kDontAdaptArgumentsSentinel) \
/* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \
- TFJ(ArrayPrototypeFlatMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeFlatMap, kDontAdaptArgumentsSentinel) \
\
/* ArrayBuffer */ \
/* ES #sec-arraybuffer-constructor */ \
@@ -511,8 +509,7 @@ namespace internal {
ASM(FunctionPrototypeApply, JSTrampoline) \
CPP(FunctionPrototypeBind) \
/* ES6 #sec-function.prototype.bind */ \
- TFJ(FastFunctionPrototypeBind, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(FastFunctionPrototypeBind, kDontAdaptArgumentsSentinel) \
ASM(FunctionPrototypeCall, JSTrampoline) \
/* ES6 #sec-function.prototype-@@hasinstance */ \
TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \
@@ -526,13 +523,11 @@ namespace internal {
TFS(CreateGeneratorObject, kClosure, kReceiver) \
CPP(GeneratorFunctionConstructor) \
/* ES6 #sec-generator.prototype.next */ \
- TFJ(GeneratorPrototypeNext, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeNext, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.return */ \
- TFJ(GeneratorPrototypeReturn, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.throw */ \
- TFJ(GeneratorPrototypeThrow, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
\
/* Iterator Protocol */ \
@@ -593,7 +588,7 @@ namespace internal {
\
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
- TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \
TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
@@ -604,7 +599,7 @@ namespace internal {
/* ES #sec-get-map.prototype.size */ \
TFJ(MapPrototypeGetSize, 0, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
- TFJ(MapPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
TFJ(MapPrototypeKeys, 0, kReceiver) \
/* ES #sec-map.prototype.values */ \
@@ -616,7 +611,7 @@ namespace internal {
/* Number */ \
TFC(AllocateHeapNumber, AllocateHeapNumber) \
/* ES #sec-number-constructor */ \
- TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(NumberConstructor, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
TFJ(NumberIsFinite, 1, kReceiver, kNumber) \
/* ES6 #sec-number.isinteger */ \
@@ -663,18 +658,17 @@ namespace internal {
\
/* Object */ \
/* ES #sec-object-constructor */ \
- TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ObjectAssign, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectConstructor, kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectAssign, kDontAdaptArgumentsSentinel) \
/* ES #sec-object.create */ \
- TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectCreate, kDontAdaptArgumentsSentinel) \
CPP(ObjectDefineGetter) \
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
TFJ(ObjectEntries, 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
- TFJ(ObjectGetOwnPropertyDescriptor, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectGetOwnPropertyDescriptor, kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
@@ -747,7 +741,7 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
\
/* Set */ \
- TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
@@ -757,7 +751,7 @@ namespace internal {
/* ES #sec-get-set.prototype.size */ \
TFJ(SetPrototypeGetSize, 0, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
- TFJ(SetPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
TFJ(SetPrototypeValues, 0, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
@@ -786,12 +780,11 @@ namespace internal {
/* ES #sec-string.fromcodepoint */ \
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
- TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringFromCharCode, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.includes */ \
- TFJ(StringPrototypeIncludes, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeIncludes, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.indexof */ \
- TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeIndexOf, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES6 #sec-string.prototype.match */ \
@@ -805,11 +798,10 @@ namespace internal {
/* ES6 #sec-string.prototype.search */ \
TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.split */ \
- TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimStart, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrim, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimEnd, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimStart, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
\
@@ -825,7 +817,7 @@ namespace internal {
/* ES #sec-typedarray-constructors */ \
TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
TFJ(GenericLazyDeoptContinuation, 1, kReceiver, kResult) \
- TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
@@ -854,14 +846,19 @@ namespace internal {
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
/* ES6 %TypedArray%.prototype.map */ \
- TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy, Dummy) \
+ ASM(WasmDebugBreak, Dummy) \
TFC(WasmAtomicNotify, WasmAtomicNotify) \
- TFC(WasmI32AtomicWait, WasmI32AtomicWait) \
- TFC(WasmI64AtomicWait, WasmI64AtomicWait) \
+ TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
+ TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
+ TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
+ TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
TFC(WasmMemoryGrow, WasmMemoryGrow) \
+ TFC(WasmTableInit, WasmTableInit) \
+ TFC(WasmTableCopy, WasmTableCopy) \
TFC(WasmTableGet, WasmTableGet) \
TFC(WasmTableSet, WasmTableSet) \
TFC(WasmStackGuard, NoContext) \
@@ -881,9 +878,11 @@ namespace internal {
TFS(ThrowWasmTrapDataSegmentDropped) \
TFS(ThrowWasmTrapElemSegmentDropped) \
TFS(ThrowWasmTrapTableOutOfBounds) \
+ TFS(ThrowWasmTrapBrOnExnNullRef) \
+ TFS(ThrowWasmTrapRethrowNullRef) \
\
/* WeakMap */ \
- TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
TFJ(WeakMapGet, 1, kReceiver, kKey) \
TFJ(WeakMapPrototypeHas, 1, kReceiver, kKey) \
@@ -891,7 +890,7 @@ namespace internal {
TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
\
/* WeakSet */ \
- TFJ(WeakSetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(WeakSetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(WeakSetPrototypeHas, 1, kReceiver, kKey) \
TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
@@ -913,16 +912,13 @@ namespace internal {
CPP(AsyncGeneratorFunctionConstructor) \
/* AsyncGenerator.prototype.next ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-next */ \
- TFJ(AsyncGeneratorPrototypeNext, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeNext, kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.return ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-return */ \
- TFJ(AsyncGeneratorPrototypeReturn, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.throw ( exception ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-throw */ \
- TFJ(AsyncGeneratorPrototypeThrow, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
@@ -980,16 +976,16 @@ namespace internal {
CPP(Trace) \
\
/* Weak refs */ \
- CPP(FinalizationGroupCleanupIteratorNext) \
- CPP(FinalizationGroupCleanupSome) \
- CPP(FinalizationGroupConstructor) \
- CPP(FinalizationGroupRegister) \
- CPP(FinalizationGroupUnregister) \
+ CPP(FinalizationRegistryCleanupIteratorNext) \
+ CPP(FinalizationRegistryCleanupSome) \
+ CPP(FinalizationRegistryConstructor) \
+ CPP(FinalizationRegistryRegister) \
+ CPP(FinalizationRegistryUnregister) \
CPP(WeakRefConstructor) \
CPP(WeakRefDeref) \
\
/* Async modules */ \
- TFJ(AsyncModuleEvaluate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncModuleEvaluate, kDontAdaptArgumentsSentinel) \
\
/* CallAsyncModule* are spec anonymyous functions */ \
CPP(CallAsyncModuleFulfilled) \
@@ -1041,11 +1037,9 @@ namespace internal {
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
- TFJ(ListFormatPrototypeFormat, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ListFormatPrototypeFormat, kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl-list-format.prototype.formattoparts */ \
- TFJ(ListFormatPrototypeFormatToParts, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ListFormatPrototypeFormatToParts, kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \
CPP(ListFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl.ListFormat.supportedlocalesof */ \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index c2eb44debe..174b89795f 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -13,7 +13,34 @@
namespace v8 {
namespace internal {
+#define REVERSE_0(a) a,
+#define REVERSE_1(a, b) b, a,
+#define REVERSE_2(a, b, c) c, b, a,
+#define REVERSE_3(a, b, c, d) d, c, b, a,
+#define REVERSE_4(a, b, c, d, e) e, d, c, b, a,
+#define REVERSE_5(a, b, c, d, e, f) f, e, d, c, b, a,
+#define REVERSE_6(a, b, c, d, e, f, g) g, f, e, d, c, b, a,
+#define REVERSE_7(a, b, c, d, e, f, g, h) h, g, f, e, d, c, b, a,
+#define REVERSE_8(a, b, c, d, e, f, g, h, i) i, h, g, f, e, d, c, b, a,
+#define REVERSE_kDontAdaptArgumentsSentinel(...)
+#define REVERSE(N, ...) REVERSE_##N(__VA_ARGS__)
+
// Define interface descriptors for builtins with JS linkage.
+#ifdef V8_REVERSE_JSARGS
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ REVERSE_##Argc(__VA_ARGS__) kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
+ };
+#else
#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
struct Builtin_##Name##_InterfaceDescriptor { \
enum ParameterIndices { \
@@ -28,6 +55,7 @@ namespace internal {
"Inconsistent set of arguments"); \
static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
+#endif
// Define interface descriptors for builtins with StubCall linkage.
#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 689c7f1342..8693cd61f4 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -56,12 +56,16 @@ void GeneratorBuiltinsAssembler::InnerResume(
SmiConstant(resume_mode));
// Resume the {receiver} using our trampoline.
+ // Close the generator if there was an exception.
TVARIABLE(Object, var_exception);
Label if_exception(this, Label::kDeferred), if_final_return(this);
- TNode<Object> result = CallStub(CodeFactory::ResumeGenerator(isolate()),
- context, value, receiver);
- // Make sure we close the generator if there was an exception.
- GotoIfException(result, &if_exception, &var_exception);
+ TNode<Object> result;
+ {
+ compiler::ScopedExceptionHandler handler(this, &if_exception,
+ &var_exception);
+ result = CallStub(CodeFactory::ResumeGenerator(isolate()), context, value,
+ receiver);
+ }
// If the generator is not suspended (i.e., its state is 'executing'),
// close it and wrap the return value in IteratorResult.
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 880f665c02..6f4f54656d 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -42,7 +42,7 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Number> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -57,7 +57,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Number> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -266,11 +266,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
}
- TNode<BoolT> ShouldSkipFPRegs(SloppyTNode<Smi> mode) {
+ TNode<BoolT> ShouldSkipFPRegs(TNode<Smi> mode) {
return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs));
}
- TNode<BoolT> ShouldEmitRememberSet(SloppyTNode<Smi> remembered_set) {
+ TNode<BoolT> ShouldEmitRememberSet(TNode<Smi> remembered_set) {
return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
}
@@ -766,7 +766,7 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
- TNode<HeapObject> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<JSReceiver> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label if_empty(this), if_runtime(this, Label::kDeferred);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index e9dca2dbc3..94a79d2a32 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -23,53 +23,38 @@ TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(
return GetProperty(context, object, factory()->iterator_symbol());
}
-IteratorRecord IteratorBuiltinsAssembler::GetIterator(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- Label* if_exception, TVariable<Object>* exception) {
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
+ TNode<Object> object) {
TNode<Object> method = GetIteratorMethod(context, object);
- return GetIterator(context, object, method, if_exception, exception);
+ return GetIterator(context, object, method);
}
-IteratorRecord IteratorBuiltinsAssembler::GetIterator(
- TNode<Context> context, TNode<Object> object, TNode<Object> method,
- Label* if_exception, TVariable<Object>* exception) {
- GotoIfException(method, if_exception, exception);
-
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
+ TNode<Object> object,
+ TNode<Object> method) {
Label if_not_callable(this, Label::kDeferred), if_callable(this);
GotoIf(TaggedIsSmi(method), &if_not_callable);
Branch(IsCallable(CAST(method)), &if_callable, &if_not_callable);
BIND(&if_not_callable);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowIteratorError, context, object);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowIteratorError, context, object);
+ Unreachable();
BIND(&if_callable);
{
- Callable callable = CodeFactory::Call(isolate());
- TNode<Object> iterator = CallJS(callable, context, method, object);
- GotoIfException(iterator, if_exception, exception);
+ TNode<Object> iterator = Call(context, method, object);
Label get_next(this), if_notobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iterator), &if_notobject);
Branch(IsJSReceiver(CAST(iterator)), &get_next, &if_notobject);
BIND(&if_notobject);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+ Unreachable();
BIND(&get_next);
- const TNode<Object> next =
+ TNode<Object> next =
GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next, if_exception, exception);
-
return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
TNode<Object>::UncheckedCast(next)};
}
@@ -77,14 +62,10 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(
TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
- base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
- TVariable<Object>* exception) {
+ base::Optional<TNode<Map>> fast_iterator_result_map) {
DCHECK_NOT_NULL(if_done);
// 1. a. Let result be ? Invoke(iterator, "next", « »).
- Callable callable = CodeFactory::Call(isolate());
- TNode<Object> result =
- CallJS(callable, context, iterator.next, iterator.object);
- GotoIfException(result, if_exception, exception);
+ TNode<Object> result = Call(context, iterator.next, iterator.object);
// 3. If Type(result) is not Object, throw a TypeError exception.
Label if_notobject(this, Label::kDeferred), return_result(this);
@@ -117,17 +98,12 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
// 2. Return ToBoolean(? Get(iterResult, "done")).
TNode<Object> done =
GetProperty(context, heap_object_result, factory()->done_string());
- GotoIfException(done, if_exception, exception);
BranchIfToBooleanIsTrue(done, if_done, &return_result);
}
BIND(&if_notobject);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
+ Unreachable();
BIND(&return_result);
return CAST(heap_object_result);
@@ -135,8 +111,7 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
TNode<Context> context, TNode<JSReceiver> result,
- base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
- TVariable<Object>* exception) {
+ base::Optional<TNode<Map>> fast_iterator_result_map) {
Label exit(this);
TVARIABLE(Object, var_value);
if (fast_iterator_result_map) {
@@ -151,13 +126,8 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
}
// Generic iterator result case:
- {
- TNode<Object> value =
- GetProperty(context, result, factory()->value_string());
- GotoIfException(value, if_exception, exception);
- var_value = value;
- Goto(&exit);
- }
+ var_value = GetProperty(context, result, factory()->value_string());
+ Goto(&exit);
BIND(&exit);
return var_value.value();
@@ -174,23 +144,24 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
CSA_ASSERT(this, IsJSReceiver(iterator.object));
// Let return be ? GetMethod(iterator, "return").
- TNode<Object> method =
- GetProperty(context, iterator.object, factory()->return_string());
- GotoIfException(method, if_exception, exception);
+ TNode<Object> method;
+ {
+ compiler::ScopedExceptionHandler handler(this, if_exception, exception);
+ method = GetProperty(context, iterator.object, factory()->return_string());
+ }
// If return is undefined, return Completion(completion).
GotoIf(Word32Or(IsUndefined(method), IsNull(method)), if_exception);
{
// Let innerResult be Call(return, iterator, « »).
- // If an exception occurs, the original exception remains bound
- TNode<Object> inner_result =
- CallJS(CodeFactory::Call(isolate()), context, method, iterator.object);
- GotoIfException(inner_result, if_exception, nullptr);
-
- // (If completion.[[Type]] is throw) return Completion(completion).
- Goto(if_exception);
+ // If an exception occurs, the original exception remains bound.
+ compiler::ScopedExceptionHandler handler(this, if_exception, nullptr);
+ Call(context, method, iterator.object);
}
+
+ // (If completion.[[Type]] is throw) return Completion(completion).
+ Goto(if_exception);
}
void IteratorBuiltinsAssembler::IteratorCloseOnException(
@@ -317,10 +288,13 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
{
// 1. Let error be ThrowCompletion(a newly created TypeError object).
TVARIABLE(Object, var_exception);
- TNode<Object> ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIterableYieldedNonString), next_value);
- GotoIfException(ret, &if_exception, &var_exception);
+ {
+ compiler::ScopedExceptionHandler handler(this, &if_exception,
+ &var_exception);
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIterableYieldedNonString),
+ next_value);
+ }
Unreachable();
// 2. Return ? IteratorClose(iteratorRecord, error).
@@ -466,13 +440,15 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
IteratorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Smi> callSlot = CAST(Parameter(Descriptor::kCallSlot));
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ TNode<Smi> call_slot_smi = CAST(Parameter(Descriptor::kCallSlot));
+ TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
- TNode<Object> iteratorMethod = CAST(Parameter(Descriptor::kResult));
+ TNode<Object> iterator_method = CAST(Parameter(Descriptor::kResult));
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
- iteratorMethod, callSlot, feedback);
+ iterator_method, call_slot, feedback);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 61665d2825..4d496fa384 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -24,14 +24,9 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
- IteratorRecord GetIterator(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
- Label* if_exception = nullptr,
- TVariable<Object>* exception = nullptr);
+ IteratorRecord GetIterator(TNode<Context> context, TNode<Object> object);
IteratorRecord GetIterator(TNode<Context> context, TNode<Object> object,
- TNode<Object> method,
- Label* if_exception = nullptr,
- TVariable<Object>* exception = nullptr);
+ TNode<Object> method);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// If the iterator is done, goto {if_done}, otherwise returns an iterator
@@ -40,9 +35,7 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// object, loaded from the native context.
TNode<JSReceiver> IteratorStep(
TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
- base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
- Label* if_exception = nullptr, TVariable<Object>* exception = nullptr);
-
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
TNode<JSReceiver> IteratorStep(
TNode<Context> context, const IteratorRecord& iterator,
base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_done) {
@@ -55,8 +48,7 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// object, loaded from the native context.
TNode<Object> IteratorValue(
TNode<Context> context, TNode<JSReceiver> result,
- base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
- Label* if_exception = nullptr, TVariable<Object>* exception = nullptr);
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
// https://tc39.github.io/ecma262/#sec-iteratorclose
void IteratorCloseOnException(TNode<Context> context,
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 630d65e72c..917255f9bb 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+using compiler::ScopedExceptionHandler;
+
class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
public:
explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state)
@@ -45,7 +47,7 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability);
+ TNode<HeapObject> promise_or_capability);
};
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
@@ -118,7 +120,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Uint16T> microtask_type = LoadMapInstanceType(microtask_map);
- TVARIABLE(HeapObject, var_exception, TheHoleConstant());
+ TVARIABLE(Object, var_exception);
Label if_exception(this, Label::kDeferred);
Label is_callable(this), is_callback(this),
is_promise_fulfill_reaction_job(this),
@@ -147,10 +149,10 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<JSReceiver> callable =
LoadObjectField<JSReceiver>(microtask, CallableTask::kCallableOffset);
- const TNode<Object> result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- microtask_context, callable, UndefinedConstant());
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ Call(microtask_context, callable, UndefinedConstant());
+ }
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -173,10 +175,11 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
// But from our current measurements it doesn't seem to be a
// serious performance problem, even if the microtask is full
// of CallHandlerTasks (which is not a realistic use case anyways).
- const TNode<Object> result =
- CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
- microtask_callback, microtask_data);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ }
Goto(&done);
}
@@ -195,10 +198,11 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
- promise_to_resolve, thenable, then);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ }
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -214,24 +218,44 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- const TNode<Object> handler =
+ const TNode<Object> job_handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
const TNode<HeapObject> promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
+ TNode<Object> preserved_embedder_data = LoadObjectField(
+ microtask,
+ PromiseReactionJobTask::kContinuationPreservedEmbedderDataOffset);
+ Label preserved_data_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ preserved_embedder_data);
+ Goto(&preserved_data_done);
+ BIND(&preserved_data_done);
+
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, job_handler, promise_or_capability);
+ }
// Run the promise after/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
promise_or_capability);
+ Label preserved_data_reset_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ UndefinedConstant());
+ Goto(&preserved_data_reset_done);
+ BIND(&preserved_data_reset_done);
+
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -247,24 +271,44 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- const TNode<Object> handler =
+ const TNode<Object> job_handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
const TNode<HeapObject> promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
+ TNode<Object> preserved_embedder_data = LoadObjectField(
+ microtask,
+ PromiseReactionJobTask::kContinuationPreservedEmbedderDataOffset);
+ Label preserved_data_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ preserved_embedder_data);
+ Goto(&preserved_data_done);
+ BIND(&preserved_data_done);
+
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, job_handler, promise_or_capability);
+ }
// Run the promise after/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
promise_or_capability);
+ Label preserved_data_reset_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ UndefinedConstant());
+ Goto(&preserved_data_reset_done);
+ BIND(&preserved_data_reset_done);
+
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -415,7 +459,7 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability) {
+ TNode<HeapObject> promise_or_capability) {
Label hook(this, Label::kDeferred), done_hook(this);
Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
&done_hook);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 9c5cd54613..9af4affa68 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -359,7 +359,7 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
TNode<Object> method =
GetProperty(context, receiver, factory()->toString_string());
- Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
+ Return(Call(context, method, receiver));
BIND(&if_null_or_undefined);
ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
@@ -380,7 +380,9 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi);
BIND(&if_objectisnotsmi);
- TNode<Map> map = LoadMap(CAST(object));
+ TNode<HeapObject> heap_object = CAST(object);
+
+ TNode<Map> map = LoadMap(heap_object);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
{
@@ -393,12 +395,12 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
&call_runtime, &if_notunique_name);
BIND(&if_unique_name);
- TryHasOwnProperty(object, map, instance_type, var_unique.value(),
+ TryHasOwnProperty(heap_object, map, instance_type, var_unique.value(),
&return_true, &return_false, &call_runtime);
BIND(&if_index);
{
- TryLookupElement(CAST(object), map, instance_type, var_index.value(),
+ TryLookupElement(heap_object, map, instance_type, var_index.value(),
&return_true, &return_false, &return_false,
&call_runtime);
}
@@ -435,8 +437,8 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
// ES #sec-object.assign
TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -502,8 +504,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
TNode<EnumCache> object_enum_cache = LoadObjectField<EnumCache>(
object_descriptors, DescriptorArray::kEnumCacheOffset);
- TNode<Object> object_enum_keys =
- LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
+ auto object_enum_keys = LoadObjectField<FixedArrayBase>(
+ object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
TNode<JSArray> array;
@@ -598,8 +600,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
TNode<EnumCache> object_enum_cache = CAST(
LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset));
- TNode<Object> object_enum_keys =
- LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
+ auto object_enum_keys = LoadObjectField<FixedArrayBase>(
+ object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -1064,8 +1066,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPrototypeArg = 0;
int const kPropertiesArg = 1;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> prototype = args.GetOptionalArgumentValue(kPrototypeArg);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 6b1f43d8ea..c48fc0f78f 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -28,11 +28,6 @@ void PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(
}
}
-TNode<HeapObject> PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- TNode<Context> context) {
- return Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks);
-}
-
TNode<HeapObject> PromiseBuiltinsAssembler::AllocateJSPromise(
TNode<Context> context) {
return Allocate(JSPromise::kSizeWithEmbedderFields);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 66044b51af..377db4d9e3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -20,8 +20,6 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
void ZeroOutEmbedderOffsets(TNode<JSPromise> promise);
TNode<HeapObject> AllocateJSPromise(TNode<Context> context);
-
- TNode<HeapObject> AllocatePromiseReactionJobTask(TNode<Context> context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index c0901953d1..caafcf6506 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -126,8 +126,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
UncheckedCast<IntPtrT>(argc_ptr));
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
- TNode<Object> result = CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, receiver, array);
+ TNode<Object> result = Call(context, trap, handler, target, receiver, array);
args.PopAndReturn(result);
BIND(&trap_undefined);
@@ -181,8 +180,8 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
UncheckedCast<IntPtrT>(argc_ptr));
// 8. Let newObj be ? Call(trap, handler, « target, argArray, newTarget »).
- TNode<Object> new_obj = CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, array, new_target);
+ TNode<Object> new_obj =
+ Call(context, trap, handler, target, array, new_target);
// 9. If Type(newObj) is not Object, throw a TypeError exception.
GotoIf(TaggedIsSmi(new_obj), &not_an_object);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 792f0e44a6..d06ced76d2 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -164,9 +164,9 @@ void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode<JSRegExp> regexp,
StoreObjectField(regexp, field_offset, value);
}
-void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode<Context> context,
- SloppyTNode<Object> regexp,
- SloppyTNode<Object> value) {
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(TNode<Context> context,
+ TNode<Object> regexp,
+ TNode<Object> value) {
TNode<String> name = HeapConstant(isolate()->factory()->lastIndex_string());
SetPropertyStrict(context, regexp, name, value);
}
@@ -1237,8 +1237,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
}
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
- SloppyTNode<String> string, SloppyTNode<Number> index,
- SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
+ TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
+ bool is_fastpath) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index c12ed63722..273e315599 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -37,9 +37,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
- void SlowStoreLastIndex(SloppyTNode<Context> context,
- SloppyTNode<Object> regexp,
- SloppyTNode<Object> value);
+ void SlowStoreLastIndex(TNode<Context> context, TNode<Object> regexp,
+ TNode<Object> value);
// Loads {var_string_start} and {var_string_end} with the corresponding
// offsets into the given {string_data}.
@@ -140,10 +139,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
const TNode<Object> maybe_pattern,
const TNode<Object> maybe_flags);
- TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
- SloppyTNode<Number> index,
- SloppyTNode<BoolT> is_unicode,
- bool is_fastpath);
+ TNode<Number> AdvanceStringIndex(TNode<String> string, TNode<Number> index,
+ TNode<BoolT> is_unicode, bool is_fastpath);
TNode<Smi> AdvanceStringIndexFast(TNode<String> string, TNode<Smi> index,
TNode<BoolT> is_unicode) {
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index e6251c9480..f89bc25bdb 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -25,20 +25,18 @@ namespace internal {
// #sec-atomics.islockfree
inline bool AtomicIsLockFree(double size) {
// According to the standard, 1, 2, and 4 byte atomics are supposed to be
- // 'lock free' on every platform. But what exactly does 'lock free' mean?
- // For example, on x64 V8 uses a lock prefix to implement the semantics of
- // many atomic operations. Is that considered a lock? Probably not.
+ // 'lock free' on every platform. 'Lock free' means that all possible uses of
+ // those atomics guarantee forward progress for the agent cluster (i.e. all
+ // threads in contrast with a single thread).
//
- // On the other hand, V8 emits a few instructions for some arm atomics which
- // do appear to be a low level form of a spin lock. With an abundance of
- // caution, we only claim to have 'true lock free' support for 8 byte sizes
- // on x64 platforms. If people care about this function returning true, then
- // we need to clarify exactly what 'lock free' means at the standard level.
- bool is_lock_free = size == 1 || size == 2 || size == 4;
-#if V8_TARGET_ARCH_x64
- is_lock_free |= size == 8;
-#endif
- return is_lock_free;
+ // This property is often, but not always, aligned with whether atomic
+ // accesses are implemented with software locks such as mutexes.
+ //
+ // V8 has lock free atomics for all sizes on all supported first-class
+ // architectures: ia32, x64, ARM32 variants, and ARM64. Further, this property
+ // is depended upon by WebAssembly, which prescribes that all atomic accesses
+ // are always lock free.
+ return size == 1 || size == 2 || size == 4 || size == 8;
}
// ES #sec-atomics.islockfree
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5fe534879a..e2d1635274 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -973,7 +973,7 @@ void StringBuiltinsAssembler::StringIndexOf(
const TNode<IntPtrT> search_length =
IntPtrSub(subject_length, start_position);
const TNode<IntPtrT> search_byte =
- ChangeInt32ToIntPtr(Load(MachineType::Uint8(), adjusted_search_ptr));
+ ChangeInt32ToIntPtr(Load<Uint8T>(adjusted_search_ptr));
const TNode<ExternalReference> memchr =
ExternalConstant(ExternalReference::libc_memchr_function());
@@ -1074,8 +1074,8 @@ TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
// ES6 String.prototype.includes(searchString [, position])
// #sec-string.prototype.includes
TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIncludes, argc, context);
}
@@ -1083,8 +1083,8 @@ TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
// ES6 String.prototype.indexOf(searchString [, position])
// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIndexOf, argc, context);
}
@@ -1293,8 +1293,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
replace));
},
[=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, search, receiver, replace));
+ Return(Call(context, fn, search, receiver, replace));
});
// Convert {receiver} and {search} to strings.
@@ -1394,10 +1393,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&if_iscallablereplace);
{
- Callable call_callable = CodeFactory::Call(isolate());
const TNode<Object> replacement =
- CallJS(call_callable, context, replace, UndefinedConstant(),
- search_string, match_start_index, subject_string);
+ Call(context, replace, UndefinedConstant(), search_string,
+ match_start_index, subject_string);
const TNode<String> replacement_string =
ToString_Inline(context, replacement);
var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
@@ -1463,8 +1461,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
context, maybe_regexp, receiver, symbol, property_to_check,
[=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
[=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ Return(Call(context, fn, maybe_regexp, receiver));
});
// maybe_regexp is not a RegExp nor has [@@match / @@search] property.
@@ -1494,9 +1491,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
BIND(&slow_path);
{
TNode<Object> maybe_func = GetProperty(context, regexp, symbol);
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, maybe_func, regexp,
- receiver_string));
+ Return(Call(context, maybe_func, regexp, receiver_string));
}
}
}
@@ -1588,8 +1583,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s));
};
auto if_generic_call = [=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ Return(Call(context, fn, maybe_regexp, receiver));
};
MaybeCallFunctionAtSymbol(
context, maybe_regexp, receiver, isolate()->factory()->match_all_symbol(),
@@ -1606,10 +1600,9 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
maybe_regexp, StringConstant("g"));
// 5. Return ? Invoke(rx, @@matchAll, « S »).
- Callable callable = CodeFactory::Call(isolate());
TNode<Object> match_all_func =
GetProperty(context, rx, isolate()->factory()->match_all_symbol());
- Return(CallJS(callable, context, match_all_func, rx, s));
+ Return(Call(context, match_all_func, rx, s));
}
// ES6 #sec-string.prototype.search
@@ -1699,8 +1692,8 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const int kSeparatorArg = 0;
const int kLimitArg = 1;
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -1724,9 +1717,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
separator, receiver, limit));
},
[&](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- args.PopAndReturn(
- CallJS(call_callable, context, fn, separator, receiver, limit));
+ args.PopAndReturn(Call(context, fn, separator, receiver, limit));
});
// String and integer conversions.
@@ -1808,8 +1799,8 @@ TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) {
// ES6 #sec-string.prototype.trim
TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrim, "String.prototype.trim", argc, context);
@@ -1817,8 +1808,8 @@ TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrimStart, "String.prototype.trimLeft", argc, context);
@@ -1826,8 +1817,8 @@ TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrimEnd, "String.prototype.trimRight", argc, context);
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 3ef8fc2a9b..61cd984e7f 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -68,7 +68,7 @@ namespace string {
const string: String = ToThisString(receiver, methodName);
// 3. Let position be ? ToInteger(pos).
- const indexNumber: Number = ToInteger_Inline(position, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(position);
// Convert the {position} to a uintptr and check that it's in bounds of
// the {string}.
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 23339fa1c8..021a0e9240 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -89,8 +89,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
@@ -378,13 +378,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(
- TNode<JSArrayBuffer> buffer) {
- TNode<Uint32T> bitfield =
- LoadObjectField<Uint32T>(buffer, JSArrayBuffer::kBitFieldOffset);
- return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield);
-}
-
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 7424020596..1008b6bdd7 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -85,8 +85,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
- TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer);
-
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index c8c9a2522c..82d5fe2873 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -23,11 +23,21 @@ Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate,
Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); }
Handle<JSFunction> BuiltinArguments::target() const {
- return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
+#ifdef V8_REVERSE_JSARGS
+ int index = kTargetOffset;
+#else
+ int index = Arguments::length() - 1 - kTargetOffset;
+#endif
+ return Handle<JSFunction>(address_of_arg_at(index));
}
Handle<HeapObject> BuiltinArguments::new_target() const {
- return Arguments::at<HeapObject>(Arguments::length() - 1 - kNewTargetOffset);
+#ifdef V8_REVERSE_JSARGS
+ int index = kNewTargetOffset;
+#else
+ int index = Arguments::length() - 1 - kNewTargetOffset;
+#endif
+ return Handle<JSFunction>(address_of_arg_at(index));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 601dfd5813..3bed3bc651 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
// Arguments object passed to C++ builtins.
-class BuiltinArguments : public Arguments {
+class BuiltinArguments : public JavaScriptArguments {
public:
BuiltinArguments(int length, Address* arguments)
: Arguments(length, arguments) {
@@ -25,13 +25,24 @@ class BuiltinArguments : public Arguments {
Object operator[](int index) const {
DCHECK_LT(index, length());
- return Arguments::operator[](index);
+ return Object(*address_of_arg_at(index + kArgsOffset));
}
template <class S = Object>
Handle<S> at(int index) const {
DCHECK_LT(index, length());
- return Arguments::at<S>(index);
+ return Handle<S>(address_of_arg_at(index + kArgsOffset));
+ }
+
+ inline void set_at(int index, Object value) {
+ DCHECK_LT(index, length());
+ *address_of_arg_at(index + kArgsOffset) = value.ptr();
+ }
+
+ // Note: this should return the address after the receiver,
+ // even when length() == 1.
+ inline Address* address_of_first_argument() const {
+ return address_of_arg_at(kArgsOffset + 1); // Skips receiver.
}
static constexpr int kNewTargetOffset = 0;
@@ -42,6 +53,12 @@ class BuiltinArguments : public Arguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
+#ifdef V8_REVERSE_JSARGS
+ static constexpr int kArgsOffset = 4;
+#else
+ static constexpr int kArgsOffset = 0;
+#endif
+
inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
inline Handle<Object> receiver() const;
inline Handle<JSFunction> target() const;
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 0f5d86e646..770f5da97b 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -28,6 +28,13 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
IntPtrConstant(WasmInstanceObject::kNativeContextOffset -
kHeapObjectTag)));
}
+
+ TNode<Smi> SmiFromUint32WithSaturation(TNode<Uint32T> value, uint32_t max) {
+ DCHECK_LE(max, static_cast<uint32_t>(Smi::kMaxValue));
+ TNode<Uint32T> capped_value = SelectConstant(
+ Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max));
+ return SmiFromUint32(capped_value);
+ }
};
TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
@@ -53,7 +60,16 @@ TF_BUILTIN(WasmRethrow, WasmBuiltinsAssembler) {
TNode<Object> exception = CAST(Parameter(Descriptor::kException));
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
+
+ Label nullref(this, Label::kDeferred);
+ GotoIf(TaggedEqual(NullConstant(), exception), &nullref);
+
TailCallRuntime(Runtime::kReThrow, context, exception);
+
+ BIND(&nullref);
+ MessageTemplate message_id = MessageTemplate::kWasmTrapRethrowNullRef;
+ TailCallRuntime(Runtime::kThrowWasmError, context,
+ SmiConstant(static_cast<int>(message_id)));
}
TF_BUILTIN(WasmTraceMemory, WasmBuiltinsAssembler) {
@@ -79,48 +95,118 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
+TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
+ if (!Is32()) {
+ Unreachable();
+ return;
+ }
+
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ TNode<Number> address_number = ChangeUint32ToTagged(address);
+
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
- TNode<Float64T> timeout =
- UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+ TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
+
+ TNode<IntPtrT> timeout_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
+ TNode<IntPtrT> timeout_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
+ address_number, expected_value_number, timeout));
+ Return(Unsigned(SmiToInt32(result_smi)));
+}
+
+TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
+
+ TNode<Int32T> expected_value =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
- TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
+
+ TNode<IntPtrT> timeout_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
+ TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
- address_number, expected_value_number, timeout_number));
+ address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
+TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
+ if (!Is32()) {
+ Unreachable();
+ return;
+ }
+
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
- TNode<Uint32T> expected_value_high =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueHigh));
- TNode<Uint32T> expected_value_low =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueLow));
- TNode<Float64T> timeout =
- UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+ TNode<Number> address_number = ChangeUint32ToTagged(address);
+
+ TNode<IntPtrT> expected_value_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
+ TNode<IntPtrT> expected_value_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
+ TNode<BigInt> expected_value =
+ BigIntFromInt32Pair(expected_value_low, expected_value_high);
+
+ TNode<IntPtrT> timeout_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
+ TNode<IntPtrT> timeout_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
+ address_number, expected_value, timeout));
+ Return(Unsigned(SmiToInt32(result_smi)));
+}
+
+TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<Number> expected_value_high_number =
- ChangeUint32ToTagged(expected_value_high);
- TNode<Number> expected_value_low_number =
- ChangeUint32ToTagged(expected_value_low);
- TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
+
+ TNode<IntPtrT> expected_value_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValue));
+ TNode<BigInt> expected_value = BigIntFromInt64(expected_value_raw);
+
+ TNode<IntPtrT> timeout_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
+ TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
- TNode<Smi> result_smi = CAST(CallRuntime(
- Runtime::kWasmI64AtomicWait, context, instance, address_number,
- expected_value_high_number, expected_value_low_number, timeout_number));
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
+ address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
@@ -144,6 +230,66 @@ TF_BUILTIN(WasmMemoryGrow, WasmBuiltinsAssembler) {
Return(Int32Constant(-1));
}
+TF_BUILTIN(WasmTableInit, WasmBuiltinsAssembler) {
+ TNode<Uint32T> dst_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
+ // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
+ // sure that the values fit into a Smi.
+ STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
+ wasm::kV8MaxWasmTableSize + 1);
+ constexpr uint32_t kCap =
+ static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
+ TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
+ TNode<Uint32T> src_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
+ TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
+ TNode<Uint32T> size_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
+ TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
+ TNode<Smi> table_index =
+ UncheckedCast<Smi>(Parameter(Descriptor::kTableIndex));
+ TNode<Smi> segment_index =
+ UncheckedCast<Smi>(Parameter(Descriptor::kSegmentIndex));
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TailCallRuntime(Runtime::kWasmTableInit, context, instance, table_index,
+ segment_index, dst, src, size);
+}
+
+TF_BUILTIN(WasmTableCopy, WasmBuiltinsAssembler) {
+ // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
+ // sure that the values fit into a Smi.
+ STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
+ wasm::kV8MaxWasmTableSize + 1);
+ constexpr uint32_t kCap =
+ static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
+
+ TNode<Uint32T> dst_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
+ TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
+
+ TNode<Uint32T> src_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
+ TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
+
+ TNode<Uint32T> size_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
+ TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
+
+ TNode<Smi> dst_table =
+ UncheckedCast<Smi>(Parameter(Descriptor::kDestinationTable));
+
+ TNode<Smi> src_table =
+ UncheckedCast<Smi>(Parameter(Descriptor::kSourceTable));
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TailCallRuntime(Runtime::kWasmTableCopy, context, instance, dst_table,
+ src_table, dst, src, size);
+}
+
TF_BUILTIN(WasmTableGet, WasmBuiltinsAssembler) {
TNode<Int32T> entry_index =
UncheckedCast<Int32T>(Parameter(Descriptor::kEntryIndex));
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 28fb9c9cbd..e75c7fae9d 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-BUILTIN(FinalizationGroupConstructor) {
+BUILTIN(FinalizationRegistryConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
@@ -31,22 +31,22 @@ BUILTIN(FinalizationGroupConstructor) {
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSFinalizationGroup> finalization_group =
- Handle<JSFinalizationGroup>::cast(result);
- finalization_group->set_native_context(*isolate->native_context());
- finalization_group->set_cleanup(*cleanup);
- finalization_group->set_flags(
- JSFinalizationGroup::ScheduledForCleanupField::encode(false));
-
- DCHECK(finalization_group->active_cells().IsUndefined(isolate));
- DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
- DCHECK(finalization_group->key_map().IsUndefined(isolate));
- return *finalization_group;
+ Handle<JSFinalizationRegistry> finalization_registry =
+ Handle<JSFinalizationRegistry>::cast(result);
+ finalization_registry->set_native_context(*isolate->native_context());
+ finalization_registry->set_cleanup(*cleanup);
+ finalization_registry->set_flags(
+ JSFinalizationRegistry::ScheduledForCleanupField::encode(false));
+
+ DCHECK(finalization_registry->active_cells().IsUndefined(isolate));
+ DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
+ DCHECK(finalization_registry->key_map().IsUndefined(isolate));
+ return *finalization_registry;
}
-BUILTIN(FinalizationGroupRegister) {
+BUILTIN(FinalizationRegistryRegister) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.register";
+ const char* method_name = "FinalizationRegistry.prototype.register";
// 1. Let finalizationGroup be the this value.
//
@@ -55,7 +55,7 @@ BUILTIN(FinalizationGroupRegister) {
//
// 4. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
Handle<Object> target = args.atOrUndefined(isolate, 1);
@@ -86,15 +86,15 @@ BUILTIN(FinalizationGroupRegister) {
}
// TODO(marja): Realms.
- JSFinalizationGroup::Register(finalization_group,
- Handle<JSReceiver>::cast(target), holdings,
- unregister_token, isolate);
+ JSFinalizationRegistry::Register(finalization_registry,
+ Handle<JSReceiver>::cast(target), holdings,
+ unregister_token, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(FinalizationGroupUnregister) {
+BUILTIN(FinalizationRegistryUnregister) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.unregister";
+ const char* method_name = "FinalizationRegistry.prototype.unregister";
// 1. Let finalizationGroup be the this value.
//
@@ -103,7 +103,7 @@ BUILTIN(FinalizationGroupUnregister) {
//
// 3. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
Handle<Object> unregister_token = args.atOrUndefined(isolate, 1);
@@ -115,15 +115,16 @@ BUILTIN(FinalizationGroupUnregister) {
unregister_token));
}
- bool success = JSFinalizationGroup::Unregister(
- finalization_group, Handle<JSReceiver>::cast(unregister_token), isolate);
+ bool success = JSFinalizationRegistry::Unregister(
+ finalization_registry, Handle<JSReceiver>::cast(unregister_token),
+ isolate);
return *isolate->factory()->ToBoolean(success);
}
-BUILTIN(FinalizationGroupCleanupSome) {
+BUILTIN(FinalizationRegistryCleanupSome) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.cleanupSome";
+ const char* method_name = "FinalizationRegistry.prototype.cleanupSome";
// 1. Let finalizationGroup be the this value.
//
@@ -132,9 +133,9 @@ BUILTIN(FinalizationGroupCleanupSome) {
//
// 3. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
- Handle<Object> callback(finalization_group->cleanup(), isolate);
+ Handle<Object> callback(finalization_registry->cleanup(), isolate);
Handle<Object> callback_obj = args.atOrUndefined(isolate, 1);
// 4. If callback is not undefined and IsCallable(callback) is
@@ -148,10 +149,9 @@ BUILTIN(FinalizationGroupCleanupSome) {
callback = callback_obj;
}
- // Don't do set_scheduled_for_cleanup(false); we still have the microtask
- // scheduled and don't want to schedule another one in case the user never
- // executes microtasks.
- if (JSFinalizationGroup::Cleanup(isolate, finalization_group, callback)
+ // Don't do set_scheduled_for_cleanup(false); we still have the task
+ // scheduled.
+ if (JSFinalizationRegistry::Cleanup(isolate, finalization_registry, callback)
.IsNothing()) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
@@ -159,19 +159,20 @@ BUILTIN(FinalizationGroupCleanupSome) {
return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(FinalizationGroupCleanupIteratorNext) {
+BUILTIN(FinalizationRegistryCleanupIteratorNext) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSFinalizationGroupCleanupIterator, iterator, "next");
+ CHECK_RECEIVER(JSFinalizationRegistryCleanupIterator, iterator, "next");
- Handle<JSFinalizationGroup> finalization_group(iterator->finalization_group(),
- isolate);
- if (!finalization_group->NeedsCleanup()) {
+ Handle<JSFinalizationRegistry> finalization_registry(
+ iterator->finalization_registry(), isolate);
+ if (!finalization_registry->NeedsCleanup()) {
return *isolate->factory()->NewJSIteratorResult(
handle(ReadOnlyRoots(isolate).undefined_value(), isolate), true);
}
- Handle<Object> holdings = handle(
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate),
- isolate);
+ Handle<Object> holdings =
+ handle(JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate),
+ isolate);
return *isolate->factory()->NewJSIteratorResult(holdings, false);
}
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 955656d669..34f7ddc18a 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-descriptors.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/diagnostics/code-tracer.h"
#include "src/execution/isolate.h"
@@ -319,6 +320,7 @@ class OffHeapTrampolineGenerator {
{
FrameScope scope(&masm_, StackFrame::NONE);
if (type == TrampolineType::kJump) {
+ masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
} else {
masm_.Trap();
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 7e8d08097f..cb7ff412de 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -20,7 +20,6 @@ extern macro IsContext(HeapObject): bool;
extern macro IsNativeContext(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
-extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsBigInt(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
@@ -624,8 +623,7 @@ Cast<DebugInfo>(implicit context: Context)(o: HeapObject): DebugInfo
extern macro IsCoverageInfo(HeapObject): bool;
Cast<CoverageInfo>(implicit context: Context)(o: HeapObject): CoverageInfo
labels CastError {
- // TODO(jgruber): Assign an instance type.
- if (IsFixedArray(o)) return %RawDownCast<CoverageInfo>(o);
+ if (IsCoverageInfo(o)) return %RawDownCast<CoverageInfo>(o);
goto CastError;
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index fe44fe1287..ee9be1d411 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -168,6 +168,12 @@ Convert<intptr, Smi>(s: Smi): intptr {
Convert<uintptr, PositiveSmi>(ps: PositiveSmi): uintptr {
return Unsigned(SmiUntag(ps));
}
+Convert<intptr, TaggedIndex>(ti: TaggedIndex): intptr {
+ return TaggedIndexToIntPtr(ti);
+}
+Convert<TaggedIndex, intptr>(i: intptr): TaggedIndex {
+ return IntPtrToTaggedIndex(i);
+}
Convert<intptr, uintptr>(ui: uintptr): intptr {
const i = Signed(ui);
assert(i >= 0);
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 739cdfdcdc..5bea93214c 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -5,17 +5,18 @@
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
+#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -65,6 +66,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void CompareStackLimit(MacroAssembler* masm, Register with,
+ StackLimitKind kind) {
+ DCHECK(masm->root_array_available());
+ Isolate* isolate = masm->isolate();
+ // Address through the root register. No load is needed.
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ __ cmp(with, Operand(kRootRegister, offset));
+}
+
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch, Label* stack_overflow,
bool include_receiver = false) {
@@ -637,7 +656,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRealStackLimit(esp);
+ CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
@@ -856,22 +875,30 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register scratch1, Register scratch2,
- Label* if_return) {
+ Register scratch3, Label* if_return) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
Immediate(ExternalReference::bytecode_size_table_address()));
// Load the current bytecode.
- __ movzx_b(bytecode, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -910,9 +937,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmp(bytecode,
+ Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ j(not_equal, &not_jump_loop, Label::kNear);
+ // If this is a wide or extra wide JumpLoop, we need to restore the original
+ // bytecode_offset since we might have increased it to skip the wide /
+ // extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end, Label::kNear);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset,
Operand(bytecode_size_table, bytecode, times_int_size, 0));
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1028,7 +1070,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ mov(eax, esp);
__ sub(eax, frame_size);
- __ CompareRealStackLimit(eax);
+ CompareStackLimit(masm, eax, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1056,8 +1098,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator and bytecode offset into registers.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ CompareStackLimit(masm, esp, StackLimitKind::kInterruptStackLimit);
+ __ j(below, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
+ // The accumulator is already loaded with undefined.
+
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1088,16 +1137,45 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
+ __ Push(eax);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ecx,
- kInterpreterDispatchTableRegister, &do_return);
+ kInterpreterDispatchTableRegister, eax,
+ &do_return);
+ __ Pop(eax);
__ jmp(&do_dispatch);
__ bind(&do_return);
+ __ Pop(eax);
// The return value is in eax.
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ mov(kInterpreterBytecodeArrayRegister,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ // It's ok to clobber kInterpreterBytecodeOffsetRegister since we are setting
+ // it again after continuing.
+ __ SmiTag(kInterpreterBytecodeOffsetRegister);
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ kInterpreterBytecodeOffsetRegister);
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1408,6 +1486,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ j(greater_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1425,12 +1512,21 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+
// Advance to the next bytecode.
Label if_return;
+ __ Push(eax);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ecx, esi,
- &if_return);
+ eax, &if_return);
+ __ Pop(eax);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ecx, kInterpreterBytecodeOffsetRegister);
__ SmiTag(ecx);
@@ -1438,8 +1534,18 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
+ // No need to pop eax here since we will be aborting anyway.
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
@@ -1524,9 +1630,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : argArray
- // -- esp[8] : thisArg
- // -- esp[12] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : thisArg
+ // -- args[2] : argArray
// -----------------------------------
// 1. Load receiver into xmm0, argArray into edx (if present), remove all
@@ -1534,20 +1641,19 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
Label no_arg_array, no_this_arg;
+ StackArgumentsAccessor args(eax);
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ movd(xmm0, args[0]);
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
__ test(eax, eax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ mov(edi, Operand(esp, eax, times_system_pointer_size, 0));
+ __ mov(edi, args[1]);
__ cmp(eax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ mov(edx,
- Operand(esp, eax, times_system_pointer_size, -kSystemPointerSize));
+ __ mov(edx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
@@ -1615,7 +1721,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
// 2. Get the callable to call (passed as receiver) from the stack.
- __ mov(edi, Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ {
+ StackArgumentsAccessor args(eax);
+ __ mov(edi, args.GetReceiverOperand());
+ }
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1641,10 +1750,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : argumentsList
- // -- esp[8] : thisArgument
- // -- esp[12] : target
- // -- esp[16] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : thisArgument
+ // -- args[3] : argumentsList
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into edx (if present),
@@ -1652,20 +1762,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// thisArgument (if present) instead.
{
Label done;
+ StackArgumentsAccessor args(eax);
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_system_pointer_size,
- -0 * kSystemPointerSize));
+ __ mov(edi, args[1]); // target
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
- -1 * kSystemPointerSize));
+ __ mov(ecx, args[2]); // thisArgument
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_system_pointer_size,
- -2 * kSystemPointerSize));
+ __ mov(edx, args[3]); // argumentsList
__ bind(&done);
// Spill argumentsList to use edx as a scratch register.
@@ -1701,10 +1809,11 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : new.target (optional)
- // -- esp[8] : argumentsList
- // -- esp[12] : target
- // -- esp[16] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : argumentsList
+ // -- args[3] : new.target (optional)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into ecx (if present),
@@ -1713,21 +1822,19 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// (if present) instead.
{
Label done;
+ StackArgumentsAccessor args(eax);
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_system_pointer_size,
- -0 * kSystemPointerSize));
+ __ mov(edi, args[1]); // target
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
- -1 * kSystemPointerSize));
+ __ mov(ecx, args[2]); // argumentsList
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_system_pointer_size,
- -2 * kSystemPointerSize));
+ __ mov(edx, args[3]); // new.target
__ bind(&done);
// Spill argumentsList to use ecx as a scratch register.
@@ -1989,6 +2096,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
+ StackArgumentsAccessor args(eax);
__ AssertFunction(edi);
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
@@ -2022,15 +2130,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(ecx);
} else {
Label convert_to_object, convert_receiver;
- __ mov(ecx,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ mov(ecx, args.GetReceiverOperand());
__ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
__ j(above_equal, &done_convert);
// Reload the receiver (it was clobbered by CmpObjectType).
- __ mov(ecx,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ mov(ecx, args.GetReceiverOperand());
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
@@ -2066,8 +2172,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
- ecx);
+ __ mov(args.GetReceiverOperand(), ecx);
}
__ bind(&done_convert);
@@ -2125,7 +2230,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRealStackLimit(esp);
+ CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
@@ -2202,8 +2307,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(edi);
// Patch the receiver to [[BoundThis]].
+ StackArgumentsAccessor args(eax);
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), ecx);
+ __ mov(args.GetReceiverOperand(), ecx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2220,6 +2326,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
// -----------------------------------
+ StackArgumentsAccessor args(eax);
Label non_callable, non_function, non_smi, non_jsfunction,
non_jsboundfunction;
@@ -2251,7 +2358,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), edi);
+ __ mov(args.GetReceiverOperand(), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(edi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2334,6 +2441,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
+ StackArgumentsAccessor args(eax);
// Check if target is a Smi.
Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
@@ -2370,8 +2478,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
- edi);
+ __ mov(args.GetReceiverOperand(), edi);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(edi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2396,8 +2503,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
const Register kExpectedNumberOfArgumentsRegister = ecx;
Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
- __ cmp(kExpectedNumberOfArgumentsRegister,
- SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ cmp(kExpectedNumberOfArgumentsRegister, kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &too_few);
@@ -2564,7 +2670,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
- // Push the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(kWasmCompileLazyFuncIndexRegister);
@@ -2594,6 +2700,49 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ jmp(edi);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(Register::from_code(reg_code));
+ }
+
+ constexpr int kFpStackSize =
+ kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
+ __ AllocateStackSpace(kFpStackSize);
+ int offset = kFpStackSize;
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ offset -= kSimd128Size;
+ __ movdqu(Operand(esp, offset), DoubleRegister::from_code(reg_code));
+ }
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ __ movdqu(DoubleRegister::from_code(reg_code), Operand(esp, offset));
+ offset += kSimd128Size;
+ }
+ __ add(esp, Immediate(kFpStackSize));
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Pop(Register::from_code(reg_code));
+ }
+ }
+
+ __ ret(0);
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index ebedbdce75..65cb207eaa 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -6,46 +6,20 @@
namespace internal_coverage {
- const kHasCoverageInfo:
- constexpr int31 generates 'DebugInfo::kHasCoverageInfo';
-
- const kFirstSlotIndex:
- constexpr int31 generates 'CoverageInfo::kFirstSlotIndex';
- const kSlotBlockCountIndex:
- constexpr int31 generates 'CoverageInfo::kSlotBlockCountIndex';
- const kSlotIndexCountLog2:
- constexpr int31 generates 'CoverageInfo::kSlotIndexCountLog2';
- const kSlotIndexCountMask:
- constexpr int31 generates 'CoverageInfo::kSlotIndexCountMask';
-
macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
CoverageInfo labels IfNoCoverageInfo {
const shared: SharedFunctionInfo = function.shared_function_info;
const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
otherwise goto IfNoCoverageInfo;
- if ((debugInfo.flags & kHasCoverageInfo) == 0) goto IfNoCoverageInfo;
+ if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo;
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
- macro SlotCount(coverageInfo: CoverageInfo): Smi {
- assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
- assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
- return coverageInfo.length >> kSlotIndexCountLog2;
- }
-
- macro FirstIndexForSlot(implicit context: Context)(slot: Smi): Smi {
- assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
- return slot << kSlotIndexCountLog2;
- }
-
macro IncrementBlockCount(implicit context: Context)(
coverageInfo: CoverageInfo, slot: Smi) {
- assert(slot < SlotCount(coverageInfo));
- const slotStart: Smi = FirstIndexForSlot(slot);
- const index: Smi = slotStart + kSlotBlockCountIndex;
- coverageInfo.objects[index] =
- UnsafeCast<Smi>(coverageInfo.objects[index]) + 1;
+ assert(Convert<int32>(slot) < coverageInfo.slot_count);
+ ++coverageInfo.slots[slot].block_count;
}
builtin IncBlockCounter(implicit context: Context)(
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 51a9564850..272a2a7db8 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -52,8 +52,8 @@ namespace iterator {
Context)(JSAny);
transitioning builtin GetIteratorWithFeedback(
- context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi,
- feedback: Undefined|FeedbackVector): JSAny {
+ context: Context, receiver: JSAny, loadSlot: TaggedIndex,
+ callSlot: TaggedIndex, feedback: Undefined|FeedbackVector): JSAny {
let iteratorMethod: JSAny;
typeswitch (feedback) {
case (Undefined): {
@@ -64,8 +64,10 @@ namespace iterator {
context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
}
}
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
return CallIteratorWithFeedback(
- context, receiver, iteratorMethod, callSlot, feedback);
+ context, receiver, iteratorMethod, callSlotSmi, feedback);
}
transitioning builtin CallIteratorWithFeedback(
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 2561177cd6..cb1a86db2f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -23,6 +23,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -61,10 +62,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -143,7 +150,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ subu(scratch1, sp, scratch1);
@@ -354,7 +361,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(scratch1, sp, scratch1);
@@ -701,7 +708,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -1058,7 +1065,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Subu(t1, sp, Operand(t0));
- LoadRealStackLimit(masm, a2);
+ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, t1, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1087,6 +1094,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(a3, MemOperand(t1));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, a2, StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
+ __ bind(&after_stack_check_interrupt);
+
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1128,6 +1142,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Sw(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Lw(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1332,6 +1370,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1351,6 +1398,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
// Load the current bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1362,12 +1414,22 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1982,7 +2044,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
@@ -2139,7 +2201,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
@@ -2264,7 +2326,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ Branch(&dont_adapt_arguments, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ Operand(kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2397,7 +2459,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf(a0, a1, a2, a3);
+ constexpr RegList gp_regs = Register::ListOf(a0, a2, a3);
constexpr RegList fp_regs =
DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
__ MultiPush(gp_regs);
@@ -2419,6 +2481,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(kScratchReg, v0, 0);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 8e91f8840f..baf2d5bfec 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -23,6 +23,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -60,10 +61,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -144,7 +151,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -394,7 +401,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -501,7 +508,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -1076,7 +1083,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Dsubu(a5, sp, Operand(a4));
- LoadRealStackLimit(masm, a2);
+ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, a5, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1105,6 +1112,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sd(a3, MemOperand(a5));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
// Load accumulator as undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1147,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Sd(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1350,6 +1388,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1369,6 +1416,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
// Load the current bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1380,12 +1432,22 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -2023,7 +2085,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
@@ -2176,7 +2238,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
@@ -2300,7 +2362,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ Branch(&dont_adapt_arguments, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ Operand(kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2436,7 +2498,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs =
- Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7);
+ Register::ListOf(a0, a2, a3, a4, a5, a6, a7);
constexpr RegList fp_regs =
DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
__ MultiPush(gp_regs);
@@ -2458,6 +2520,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(v0);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index 37d51812a9..958cd5f5f6 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -34,7 +34,7 @@ namespace number {
// 4. Else, let radixNumber be ? ToInteger(radix).
const radix: JSAny = arguments[0];
const radixNumber: Number =
- radix == Undefined ? 10 : ToInteger_Inline(radix, kTruncateMinusZero);
+ radix == Undefined ? 10 : ToInteger_Inline(radix);
// 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
if (radixNumber < 2 || radixNumber > 36) {
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 97e870959a..460d749297 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -2,25 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -58,10 +59,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -75,7 +82,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
@@ -424,7 +431,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmpl(sp, scratch);
__ blt(&stack_overflow);
@@ -717,7 +724,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ sub(scratch1, sp, scratch1);
@@ -950,18 +957,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- Register scratch2 = bytecode;
+ Register scratch3 = bytecode;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -992,7 +1008,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode.
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ cmpi(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
@@ -1000,10 +1016,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmpi(bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ bne(&not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftImm(scratch2, bytecode, Operand(2));
- __ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
- __ add(bytecode_offset, bytecode_offset, scratch2);
+ __ ShiftLeftImm(scratch3, bytecode, Operand(2));
+ __ lwzx(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ add(bytecode_offset, bytecode_offset, scratch3);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -1118,7 +1148,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r8, sp, r5);
- LoadRealStackLimit(masm, r0);
+ LoadStackLimit(masm, r0, StackLimitKind::kRealStackLimit);
__ cmpl(r8, r0);
__ blt(&stack_overflow);
@@ -1148,6 +1178,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StorePX(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, r6, StackLimitKind::kInterruptStackLimit);
+ __ cmpl(sp, r6);
+ __ blt(&stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1181,7 +1219,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5,
+ kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&do_return);
__ b(&do_dispatch);
@@ -1190,6 +1228,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ StoreP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r6, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r6,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1394,6 +1457,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmpi(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ bge(&okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1413,6 +1486,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmpi(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ beq(&function_entry_bytecode);
+
// Load the current bytecode.
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1420,9 +1499,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5,
+ kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
__ StoreP(r5,
@@ -1430,6 +1510,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -2083,7 +2172,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmpl(sp, scratch);
}
__ bgt(&done); // Signed comparison.
@@ -2325,7 +2414,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
@@ -2532,6 +2621,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(r11);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ LoadSmiLiteral(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3192,4 +3303,4 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 8828ab84d2..95ca356a0c 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -40,7 +40,8 @@ namespace promise {
EnqueueMicrotask(Context, Microtask): Undefined;
macro
- ExtractHandlerContext(implicit context: Context)(handler: Callable|Undefined):
+ ExtractHandlerContextInternal(implicit context: Context)(handler: Callable|
+ Undefined):
Context labels NotFound {
let iter: JSAny = handler;
while (true) {
@@ -62,16 +63,25 @@ namespace promise {
goto NotFound;
}
- // According to the HTML specification, we use the handler's context to
- // EnqueueJob for Promise resolution.
+ macro
+ ExtractHandlerContext(implicit context: Context)(handler: Callable|
+ Undefined): Context {
+ try {
+ return ExtractHandlerContextInternal(handler) otherwise NotFound;
+ }
+ label NotFound deferred {
+ return context;
+ }
+ }
+
macro
ExtractHandlerContext(implicit context: Context)(
primary: Callable|Undefined, secondary: Callable|Undefined): Context {
try {
- return ExtractHandlerContext(primary) otherwise NotFound;
+ return ExtractHandlerContextInternal(primary) otherwise NotFound;
}
label NotFound deferred {
- return ExtractHandlerContext(secondary) otherwise Default;
+ return ExtractHandlerContextInternal(secondary) otherwise Default;
}
label Default deferred {
return context;
@@ -92,6 +102,9 @@ namespace promise {
secondaryHandler = promiseReaction.fulfill_handler;
}
+ // According to HTML, we use the context of the appropriate handler as the
+ // context of the microtask. See step 3 of HTML's EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
const handlerContext: Context =
ExtractHandlerContext(primaryHandler, secondaryHandler);
@@ -102,7 +115,8 @@ namespace promise {
kPromiseReactionSize ==
kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks);
if constexpr (reactionType == kPromiseReactionFulfill) {
- promiseReaction.map = PromiseFulfillReactionJobTaskMapConstant();
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseFulfillReactionJobTaskMapConstant();
const promiseReactionJobTask =
UnsafeCast<PromiseFulfillReactionJobTask>(promiseReaction);
promiseReactionJobTask.argument = argument;
@@ -116,7 +130,8 @@ namespace promise {
kPromiseReactionJobTaskPromiseOrCapabilityOffset);
} else {
StaticAssert(reactionType == kPromiseReactionReject);
- promiseReaction.map = PromiseRejectReactionJobTaskMapConstant();
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseRejectReactionJobTaskMapConstant();
const promiseReactionJobTask =
UnsafeCast<PromiseRejectReactionJobTask>(promiseReaction);
promiseReactionJobTask.argument = argument;
@@ -416,34 +431,33 @@ namespace promise {
// PromiseReaction holding both the onFulfilled and onRejected callbacks.
// Once the {promise} is resolved we decide on the concrete handler to
// push onto the microtask queue.
+ const handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
const promiseReactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
const reaction = NewPromiseReaction(
- promiseReactions, resultPromiseOrCapability, onFulfilled, onRejected);
+ handlerContext, promiseReactions, resultPromiseOrCapability,
+ onFulfilled, onRejected);
promise.reactions_or_result = reaction;
} else {
- let map: Map;
- let handler: Callable|Undefined = Undefined;
+ const reactionsOrResult = promise.reactions_or_result;
+ let microtask: PromiseReactionJobTask;
let handlerContext: Context;
if (promise.Status() == PromiseState::kFulfilled) {
- map = PromiseFulfillReactionJobTaskMapConstant();
- handler = onFulfilled;
handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
+ microtask = NewPromiseFulfillReactionJobTask(
+ handlerContext, reactionsOrResult, onFulfilled,
+ resultPromiseOrCapability);
} else
deferred {
assert(promise.Status() == PromiseState::kRejected);
- map = PromiseRejectReactionJobTaskMapConstant();
- handler = onRejected;
handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
+ microtask = NewPromiseRejectReactionJobTask(
+ handlerContext, reactionsOrResult, onRejected,
+ resultPromiseOrCapability);
if (!promise.HasHandler()) {
runtime::PromiseRevokeReject(promise);
}
}
-
- const reactionsOrResult = promise.reactions_or_result;
- const microtask = NewPromiseReactionJobTask(
- map, handlerContext, reactionsOrResult, handler,
- resultPromiseOrCapability);
EnqueueMicrotask(handlerContext, microtask);
}
promise.SetHasHandler();
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 7996cc5b3d..61461de29f 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -16,10 +16,6 @@ namespace promise_internal {
void;
extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
-
- extern macro PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- Context): HeapObject;
-
}
namespace promise {
@@ -45,7 +41,7 @@ namespace promise {
assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
const promiseHeapObject = promise_internal::AllocateJSPromise(context);
- promiseHeapObject.map = promiseMap;
+ * UnsafeConstCast(& promiseHeapObject.map) = promiseMap;
const promise = UnsafeCast<JSPromise>(promiseHeapObject);
promise.properties_or_hash = kEmptyFixedArray;
promise.elements = kEmptyFixedArray;
@@ -54,20 +50,36 @@ namespace promise {
return promise;
}
- macro NewPromiseReactionJobTask(implicit context: Context)(
- map: Map, handlerContext: Context, argument: Object,
- handler: Callable|Undefined,
+ macro NewPromiseFulfillReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
promiseOrCapability: JSPromise|PromiseCapability|
- Undefined): PromiseReactionJobTask {
- const taskHeapObject =
- promise_internal::AllocatePromiseReactionJobTask(context);
- taskHeapObject.map = map;
- const jobTask = UnsafeCast<PromiseReactionJobTask>(taskHeapObject);
- jobTask.argument = argument;
- jobTask.context = handlerContext;
- jobTask.handler = handler;
- jobTask.promise_or_capability = promiseOrCapability;
- return jobTask;
+ Undefined): PromiseFulfillReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseFulfillReactionJobTask{
+ map: PromiseFulfillReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
+ }
+
+ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|
+ Undefined): PromiseRejectReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseRejectReactionJobTask{
+ map: PromiseRejectReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
}
// These allocate and initialize a promise with pending state and
@@ -113,16 +125,19 @@ namespace promise {
}
macro NewPromiseReaction(implicit context: Context)(
- next: Zero|PromiseReaction,
+ handlerContext: Context, next: Zero|PromiseReaction,
promiseOrCapability: JSPromise|PromiseCapability|Undefined,
fulfillHandler: Callable|Undefined,
rejectHandler: Callable|Undefined): PromiseReaction {
+ const nativeContext = LoadNativeContext(handlerContext);
return new PromiseReaction{
map: PromiseReactionMapConstant(),
next: next,
reject_handler: rejectHandler,
fulfill_handler: fulfillHandler,
- promise_or_capability: promiseOrCapability
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
};
}
@@ -130,10 +145,10 @@ namespace promise {
macro NewPromiseResolveThenableJobTask(implicit context: Context)(
promiseToResolve: JSPromise, then: JSReceiver, thenable: JSReceiver,
- thenableContext: Context): PromiseResolveThenableJobTask {
+ thenContext: Context): PromiseResolveThenableJobTask {
return new PromiseResolveThenableJobTask{
map: PromiseResolveThenableJobTaskMapConstant(),
- context: thenableContext,
+ context: thenContext,
promise_to_resolve: promiseToResolve,
then: then,
thenable: thenable
diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq
index 1db33f4a7a..1d20d22efb 100644
--- a/deps/v8/src/builtins/promise-reaction-job.tq
+++ b/deps/v8/src/builtins/promise-reaction-job.tq
@@ -17,27 +17,28 @@ namespace promise {
promiseOrCapability: JSPromise|PromiseCapability|Undefined, reason: JSAny,
reactionType: constexpr PromiseReactionType): JSAny {
if constexpr (reactionType == kPromiseReactionReject) {
- if (IsJSPromise(promiseOrCapability)) {
- // For fast native promises we can skip the indirection via the
- // promiseCapability.[[Reject]] function and run the resolve logic
- // directly from here.
- return RejectPromise(
- UnsafeCast<JSPromise>(promiseOrCapability), reason, False);
- } else
- deferred {
- assert(IsPromiseCapability(promiseOrCapability));
+ typeswitch (promiseOrCapability) {
+ case (promise: JSPromise): {
+ // For fast native promises we can skip the indirection via the
+ // promiseCapability.[[Reject]] function and run the resolve logic
+ // directly from here.
+ return RejectPromise(promise, reason, False);
+ }
+ case (Undefined): {
+ return Undefined;
+ }
+ case (capability: PromiseCapability): {
// In the general case we need to call the (user provided)
// promiseCapability.[[Reject]] function.
try {
- const promiseCapability =
- UnsafeCast<PromiseCapability>(promiseOrCapability);
- const reject = UnsafeCast<Callable>(promiseCapability.reject);
+ const reject = UnsafeCast<Callable>(capability.reject);
return Call(context, reject, Undefined, reason);
} catch (e) {
// Swallow the exception here.
return runtime::ReportMessage(e);
}
}
+ }
} else {
StaticAssert(reactionType == kPromiseReactionFulfill);
// We have to call out to the dedicated PromiseRejectReactionJob
@@ -53,20 +54,20 @@ namespace promise {
context: Context,
promiseOrCapability: JSPromise|PromiseCapability|Undefined, result: JSAny,
reactionType: constexpr PromiseReactionType): JSAny {
- if (IsJSPromise(promiseOrCapability)) {
- // For fast native promises we can skip the indirection via the
- // promiseCapability.[[Resolve]] function and run the resolve logic
- // directly from here.
- return ResolvePromise(
- context, UnsafeCast<JSPromise>(promiseOrCapability), result);
- } else
- deferred {
- assert(IsPromiseCapability(promiseOrCapability));
+ typeswitch (promiseOrCapability) {
+ case (promise: JSPromise): {
+ // For fast native promises we can skip the indirection via the
+ // promiseCapability.[[Resolve]] function and run the resolve logic
+ // directly from here.
+ return ResolvePromise(context, promise, result);
+ }
+ case (Undefined): {
+ return Undefined;
+ }
+ case (capability: PromiseCapability): {
// In the general case we need to call the (user provided)
// promiseCapability.[[Resolve]] function.
- const promiseCapability =
- UnsafeCast<PromiseCapability>(promiseOrCapability);
- const resolve = UnsafeCast<Callable>(promiseCapability.resolve);
+ const resolve = UnsafeCast<Callable>(capability.resolve);
try {
return Call(context, resolve, Undefined, result);
} catch (e) {
@@ -74,6 +75,7 @@ namespace promise {
context, promiseOrCapability, e, reactionType);
}
}
+ }
}
// https://tc39.es/ecma262/#sec-promisereactionjob
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index af7dd7afa0..0fc98b556b 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -177,7 +177,14 @@ namespace promise {
label Enqueue {
// 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
// «promise, resolution, thenAction»).
- const nativeContext = LoadNativeContext(context);
+
+ // According to HTML, we use the context of the then function
+ // (|thenAction|) as the context of the microtask. See step 3 of HTML's
+ // EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
+ const thenContext: Context =
+ ExtractHandlerContext(UnsafeCast<Callable>(then));
+ const nativeContext = LoadNativeContext(thenContext);
const task = NewPromiseResolveThenableJobTask(
promise, UnsafeCast<JSReceiver>(then),
UnsafeCast<JSReceiver>(resolution), nativeContext);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index b9e56659f7..88bb80891e 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -6,21 +6,22 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -58,10 +59,15 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-MemOperand RealStackLimitAsMemOperand(MacroAssembler* masm) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+MemOperand StackLimitAsMemOperand(MacroAssembler* masm, StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -75,7 +81,8 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
@@ -196,7 +203,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
__ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor,
@@ -366,8 +374,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ bne(&done, Label::kNear);
- __ LoadP(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
@@ -381,14 +390,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r3);
// Store input value into generator object.
- __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
- r0);
+ __ StoreTaggedField(
+ r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
- __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -416,12 +427,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ CmpLogicalP(sp, scratch);
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
@@ -433,28 +446,32 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadLogicalHalfWordP(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadP(r4, FieldMemOperand(
- r3, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ r4,
+ FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
- __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, r5);
+ __ ShiftLeftP(r1, r5, Operand(kSystemPointerSizeLog2));
+ __ SubP(sp, r1);
+
+ __ ShiftLeftP(r5, r5, Operand(kTaggedSizeLog2));
// ip = stack offset
// r5 = parameter array offset
__ LoadImmP(ip, Operand::Zero());
- __ SubP(r5, Operand(kSystemPointerSize));
+ __ SubP(r5, Operand(kTaggedSize));
__ blt(&done_loop);
- __ lgfi(r1, Operand(-kSystemPointerSize));
+ __ lghi(r1, Operand(-kTaggedSize));
__ bind(&loop);
// parameter copy loop
- __ LoadP(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
+ __ LoadAnyTaggedField(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
__ StoreP(r0, MemOperand(sp, ip));
// update offsets
@@ -467,8 +484,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, r5, ip);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
@@ -482,7 +501,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
}
@@ -494,7 +513,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3);
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -504,7 +524,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r3);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r3);
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -761,7 +782,8 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadP(scratch1, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch1,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ SubP(scratch1, sp, scratch1);
@@ -902,8 +924,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
- __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
- r0);
+ __ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ LoadRR(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
@@ -952,8 +974,9 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadW(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
@@ -1008,18 +1031,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- Register scratch2 = bytecode;
+ Register scratch3 = bytecode;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -1050,7 +1082,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode.
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ CmpP(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
@@ -1058,10 +1090,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ CmpP(bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ bne(&not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftP(scratch2, bytecode, Operand(2));
- __ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
- __ AddP(bytecode_offset, bytecode_offset, scratch2);
+ __ ShiftLeftP(scratch3, bytecode, Operand(2));
+ __ LoadlW(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ AddP(bytecode_offset, bytecode_offset, scratch3);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1086,10 +1132,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
// The bytecode array could have been flushed from the shared function info,
@@ -1100,15 +1148,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
@@ -1116,9 +1166,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimized_code_entry = r6;
// Read off the optimized code slot in the feedback vector.
- __ LoadP(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
@@ -1172,7 +1223,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ SubP(r8, sp, r4);
- __ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm));
+ __ CmpLogicalP(
+ r8, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1202,6 +1254,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadP(r5,
+ StackLimitAsMemOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ CmpLogicalP(sp, r5);
+ __ blt(&stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1236,7 +1297,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4,
+ kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&do_return);
__ b(&do_dispatch);
@@ -1254,6 +1315,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ StoreP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r5,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
@@ -1401,15 +1487,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadP(r4,
- FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
@@ -1447,6 +1535,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CmpP(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ bge(&okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1466,6 +1563,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ CmpP(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ beq(&function_entry_bytecode);
+
// Load the current bytecode.
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1473,9 +1576,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4,
+ kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
__ StoreP(r4,
@@ -1483,6 +1587,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1580,13 +1693,14 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ LoadP(r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
- __ SmiUntag(r3);
+ __ SmiUntagField(
+ r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1865,7 +1979,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
- __ LoadP(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadHalfWordP(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ CmpP(scratch, Operand(FIXED_ARRAY_TYPE));
@@ -1890,13 +2005,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(
- r4, r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
+ __ AddP(r4, r4,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, kSystemPointerSize));
- __ la(r4, MemOperand(r4, kSystemPointerSize));
+ __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
+ __ la(r4, MemOperand(r4, kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -1930,7 +2044,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
@@ -1954,7 +2069,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ beq(&arguments_adaptor);
{
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadLogicalHalfWordP(
r7,
FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2014,7 +2130,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
@@ -2022,7 +2139,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ AndP(r0, r5,
@@ -2076,7 +2194,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r2, r3);
__ SmiUntag(r2);
}
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2));
@@ -2115,9 +2234,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r4 and length of that into r6.
Label no_bound_arguments;
- __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
- __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ SmiUntag(r6);
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ LoadAndTestP(r6, r6);
__ beq(&no_bound_arguments);
{
@@ -2133,16 +2252,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ LoadRR(scratch, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, sp, r9);
+ __ SubP(r1, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CmpLogicalP(sp, RealStackLimitAsMemOperand(masm));
+ __ CmpLogicalP(
+ r1, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ LoadRR(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2151,10 +2269,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ __ LoadRR(scratch, sp);
+ __ LoadRR(sp, r1);
+
// Relocate arguments down the stack.
// -- r2 : the number of arguments (not including the receiver)
// -- r8 : the previous stack pointer
- // -- r9: the size of the [[BoundArguments]]
{
Label skip, loop;
__ LoadImmP(r7, Operand::Zero());
@@ -2164,7 +2284,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kSystemPointerSize));
+ __ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&skip);
}
@@ -2172,14 +2292,14 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
- __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(r4, r4, r9);
+ __ ShiftLeftP(r9, r6, Operand(kTaggedSizeLog2));
+ __ lay(r4, MemOperand(r4, r9, FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r4, -kSystemPointerSize));
- __ lay(r4, MemOperand(r4, -kSystemPointerSize));
- __ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kSystemPointerSize));
+ __ LoadAnyTaggedField(ip, MemOperand(r4, -kTaggedSize), r0);
+ __ lay(r4, MemOperand(r4, -kTaggedSize));
+ __ StoreP(ip, MemOperand(sp, r7));
+ __ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ AddP(r2, r2, r6);
}
@@ -2198,7 +2318,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
- __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ LoadAnyTaggedField(r5,
+ FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r1));
@@ -2206,8 +2327,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadP(r3,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2275,7 +2396,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ beq(&call_generic_stub);
@@ -2303,15 +2425,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
- __ CmpP(r3, r5);
+ __ CompareTagged(r3, r5);
__ bne(&skip);
- __ LoadP(r5,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadP(r3,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2329,7 +2451,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(r3, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r4, Map::Bits1::IsConstructorBit::kShift);
__ beq(&non_constructor);
@@ -2379,9 +2501,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ tmll(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ tmll(r4, Operand(kDontAdaptArgumentsSentinel));
__ b(Condition(1), &dont_adapt_arguments);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ tmlh(r6,
Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask >>
@@ -2485,7 +2608,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ CallCodeObject(r4);
// Store offset of return address for deoptimizer.
@@ -2537,8 +2660,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ __ RecordComment("-- Call without adapting args --");
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
__ bind(&stack_overflow);
@@ -2588,6 +2712,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(ip);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ LoadSmiLiteral(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3144,14 +3290,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadAnyTaggedField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
@@ -3200,7 +3348,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadP(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index e3f39a0906..4739e18c57 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/compiler/code-assembler.h"
#include "src/execution/isolate.h"
@@ -42,8 +43,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
- const base::AddressRegion& code_range =
- isolate->heap()->memory_allocator()->code_range();
+ const base::AddressRegion& code_range = isolate->heap()->code_range();
bool pc_relative_calls_fit_in_code_range =
!code_range.is_empty() &&
std::ceil(static_cast<float>(code_range.size() / MB)) <=
@@ -97,6 +97,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
ExternalAssemblerBuffer(buffer, kBufferSize));
masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
+ masm.CodeEntry();
generator(&masm);
int handler_table_offset = 0;
@@ -159,7 +160,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
- (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
+ (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
isolate, &zone, argc_with_recv, Code::BUILTIN, name,
PoisoningMitigationLevel::kDontPoison, builtin_index);
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index 1c14be6cb4..e3e72ae7b5 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -36,7 +36,7 @@ namespace string {
try {
// 3. Let n be ? ToInteger(count).
- typeswitch (ToInteger_Inline(count, kTruncateMinusZero)) {
+ typeswitch (ToInteger_Inline(count)) {
case (n: Smi): {
// 4. If n < 0, throw a RangeError exception.
if (n < 0) goto InvalidCount;
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 47e91c93dc..85c43342cf 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -17,13 +17,12 @@ namespace torque_internal {
const offset: intptr;
unsafeMarker: Unsafe;
}
+ type ConstReference<T: type> extends Reference<T>;
+ type MutableReference<T: type> extends ConstReference<T>;
macro UnsafeNewReference<T: type>(object: HeapObject, offset: intptr):&T {
- return Reference<T>{
- object: object,
- offset: offset,
- unsafeMarker: Unsafe {}
- };
+ return %RawDownCast<&T>(
+ Reference<T>{object: object, offset: offset, unsafeMarker: Unsafe {}});
}
struct Slice<T: type> {
@@ -148,7 +147,7 @@ namespace torque_internal {
type UninitializedHeapObject extends HeapObject;
extern macro AllocateAllowLOS(intptr): UninitializedHeapObject;
- extern macro GetStructMap(constexpr InstanceType): Map;
+ extern macro GetInstanceTypeMap(constexpr InstanceType): Map;
macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject {
assert(ValidAllocationSize(sizeInBytes, map));
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 77a0b404be..e5398fc50a 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -13,8 +13,6 @@ namespace typed_array {
extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
- extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
- bool;
extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
JSTypedArray): void;
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 99f052d695..8d028c88f0 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -5,17 +5,18 @@
#if V8_TARGET_ARCH_X64
#include "src/api/api-arguments.h"
+#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/debug-objects.h"
#include "src/objects/foreign.h"
@@ -23,6 +24,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -65,10 +67,15 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-Operand RealStackLimitAsOperand(MacroAssembler* masm) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+Operand StackLimitAsOperand(MacroAssembler* masm, StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -84,7 +91,8 @@ void Generate_StackOverflowCheck(
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ movq(kScratchRegister, RealStackLimitAsOperand(masm));
+ __ movq(kScratchRegister,
+ StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ movq(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
@@ -116,31 +124,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(rsi);
__ Push(rcx);
+#ifdef V8_REVERSE_JSARGS
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+#else
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-
// Set up pointer to last argument.
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- // ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdi: constructor function
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter
- // -- sp[0*kSystemPointerSize]: the hole (receiver)
- // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[2*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+#endif
// Call the function.
// rax: number of arguments (untagged)
@@ -238,28 +237,34 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(rdx);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
+ // Push the allocated receiver to the stack.
__ Push(rax);
+
+#ifdef V8_REVERSE_JSARGS
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movq(r8, rax);
+
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+#else
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver.
__ Push(rax);
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize] implicit receiver
- // -- sp[1*kSystemPointerSize] implicit receiver
- // -- sp[2*kSystemPointerSize] padding
- // -- sp[3*kSystemPointerSize] constructor function
- // -- sp[4*kSystemPointerSize] number of arguments (tagged)
- // -- sp[5*kSystemPointerSize] context
- // -----------------------------------
+ // Set up pointer to last argument.
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+#endif
// Restore constructor function and argument count.
__ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
@@ -275,27 +280,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- // ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter (tagged)
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: padding
- // -- rdi and sp[3*kSystemPointerSize]: constructor function
- // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[5*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+
+#ifdef V8_REVERSE_JSARGS
+ // Push implicit receiver.
+ __ Push(r8);
+#endif
// Call the function.
__ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
@@ -606,9 +597,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
IsolateAddressId::kContextAddress, masm->isolate());
__ movq(rsi, masm->ExternalReferenceAsOperand(context_address));
- // Push the function and the receiver onto the stack.
+ // Push the function onto the stack.
__ Push(rdi);
+
+#ifndef V8_REVERSE_JSARGS
+ // Push the receiver onto the stack.
__ Push(arg_reg_4);
+#endif
#ifdef V8_TARGET_OS_WIN
// Load the previous frame pointer to access C arguments on stack
@@ -620,18 +615,25 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r8);
__ movq(rbx, r9);
+#ifdef V8_REVERSE_JSARGS
+ __ movq(r9, arg_reg_4); // Temporarily saving the receiver.
+#endif
#endif // V8_TARGET_OS_WIN
- // Current stack contents:
- // [rsp + 2 * kSystemPointerSize ... ] : Internal frame
- // [rsp + kSystemPointerSize] : function
- // [rsp] : receiver
+ // Current stack contents if V8_REVERSE_JSARGS:
+ // [rsp + kSystemPointerSize] : Internal frame
+ // [rsp] : function
+ // Current stack contents if not V8_REVERSE_JSARGS:
+ // [rsp + 2 * kSystemPointerSize] : Internal frame
+ // [rsp + kSystemPointerSize] : function
+ // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// rdx : new.target
+ // r9 : receiver, if V8_REVERSE_JSARGS
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
@@ -649,6 +651,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
+#ifdef V8_REVERSE_JSARGS
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop, Label::kNear);
+
+ // Push the receiver.
+ __ Push(r9);
+#else
Label loop, entry;
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry, Label::kNear);
@@ -659,6 +675,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(not_equal, &loop, Label::kNear);
+#endif
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -745,22 +762,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ cmpq(rsp, RealStackLimitAsOperand(masm));
+ __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(rax);
+#ifndef V8_REVERSE_JSARGS
// Push receiver.
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1);
+#endif
// ----------- S t a t e -------------
// -- rax : return address
// -- rdx : the JSGeneratorObject to resume
// -- rdi : generator function
// -- rsi : generator context
- // -- rsp[0] : generator receiver
+ // -- rsp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -773,6 +792,27 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done_loop, loop;
+ __ movq(r9, rcx);
+
+ __ bind(&loop);
+ __ decq(r9);
+ __ j(less, &done_loop, Label::kNear);
+ __ PushTaggedAnyField(
+ FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch1);
+ __ jmp(&loop);
+
+ __ bind(&done_loop);
+ }
+
+ // Push the receiver.
+ __ PushTaggedPointerField(
+ FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
+ decompr_scratch1);
+#else
Label done_loop, loop;
__ Set(r9, 0);
@@ -786,6 +826,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&loop);
__ bind(&done_loop);
+#endif
}
// Underlying function needs to have bytecode available.
@@ -963,15 +1004,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+
+ __ movq(original_bytecode_offset, bytecode_offset);
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
@@ -1013,9 +1064,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmpb(bytecode,
+ Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ j(not_equal, &not_jump_loop, Label::kNear);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ movq(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end, Label::kNear);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ addl(bytecode_offset,
Operand(bytecode_size_table, bytecode, times_int_size, 0));
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1127,7 +1192,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ movq(rax, rsp);
__ subq(rax, rcx);
- __ cmpq(rax, RealStackLimitAsOperand(masm));
+ __ cmpq(rax, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1156,6 +1221,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx);
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ j(below, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1188,7 +1260,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- &do_return);
+ r11, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1196,6 +1268,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ Move(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ movq(kInterpreterBytecodeArrayRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ movq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(rcx, kInterpreterBytecodeArrayRegister);
+ __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rcx);
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
@@ -1224,21 +1318,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args,
Register start_address,
Register scratch) {
- // Find the address of the last argument.
- __ Move(scratch, num_args);
- __ shlq(scratch, Immediate(kSystemPointerSizeLog2));
+ // Find the argument with lowest address.
+ __ movq(scratch, num_args);
__ negq(scratch);
- __ addq(scratch, start_address);
-
+ __ leaq(start_address,
+ Operand(start_address, scratch, times_system_pointer_size,
+ kSystemPointerSize));
// Push the arguments.
- Label loop_header, loop_check;
- __ j(always, &loop_check, Label::kNear);
- __ bind(&loop_header);
- __ Push(Operand(start_address, 0));
- __ subq(start_address, Immediate(kSystemPointerSize));
- __ bind(&loop_check);
- __ cmpq(start_address, scratch);
- __ j(above, &loop_header, Label::kNear);
+#ifdef V8_REVERSE_JSARGS
+ __ PushArray(start_address, num_args, scratch,
+ TurboAssembler::PushArrayOrder::kReverse);
+#else
+ __ PushArray(start_address, num_args, scratch);
+#endif
}
// static
@@ -1255,7 +1347,13 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
- // Number of values to be pushed.
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ decl(rax);
+ }
+#endif
+
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
@@ -1264,6 +1362,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
+#ifdef V8_REVERSE_JSARGS
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ decq(rcx);
+ }
+
+ // rbx and rdx will be modified.
+ Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
+
+ // Push "undefined" as the receiver arg if we need to.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register rbx.
+ // rbx already points to the penultime argument, the spread
+ // is below that.
+ __ movq(rbx, Operand(rbx, -kSystemPointerSize));
+ }
+#else
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1277,6 +1396,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Pop(rbx); // Pass the spread in a register
__ decl(rax); // Subtract one for spread
}
+#endif
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
@@ -1319,16 +1439,33 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ decl(rax);
+ }
+
+ // rcx and r8 will be modified.
+ Generate_InterpreterPushArgs(masm, rax, rcx, r8);
+
+ // Push slot for the receiver to be constructed.
+ __ Push(Immediate(0));
+#else
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
// rcx and r8 will be modified.
Generate_InterpreterPushArgs(masm, rax, rcx, r8);
+#endif
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+#ifdef V8_REVERSE_JSARGS
+ // Pass the spread in the register rbx.
+ __ movq(rbx, Operand(rcx, -kSystemPointerSize));
+#else
__ Pop(rbx); // Pass the spread in a register
__ decl(rax); // Subtract one for spread
-
+#endif
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
} else {
@@ -1423,6 +1560,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmpq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ j(greater_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1439,6 +1585,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmpq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+
// Load the current bytecode.
__ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1447,8 +1599,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- &if_return);
+ r11, &if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(kInterpreterBytecodeOffsetRegister);
__ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
@@ -1456,6 +1609,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ movq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1544,9 +1706,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : argArray
- // -- rsp[16] : thisArg
- // -- rsp[24] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : thisArg
+ // -- args[2] : argArray
// -----------------------------------
// 1. Load receiver into rdi, argArray into rbx (if present), remove all
@@ -1554,17 +1717,17 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
Label no_arg_array, no_this_arg;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movq(rbx, rdx);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movq(rdi, args[0]);
__ testq(rax, rax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rdx, args[1]);
__ cmpq(rax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(2));
+ __ movq(rbx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
@@ -1606,6 +1769,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// static
+// TODO(victor): merge steps 1, 2 and 3 when V8_REVERSE_JSARGS is set.
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -1613,17 +1777,23 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// rsp[16] : Argument n-1
// ...
// rsp[8 * n] : Argument 1
- // rsp[8 * (n + 1)] : Receiver (callable to call)
- //
+ // rsp[8 * (n + 1)] : Argument 0 (receiver: callable to call)
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
// rax contains the number of arguments, n, not counting the receiver.
- //
+
// 1. Make sure we have at least one argument.
{
Label done;
__ testq(rax, rax);
__ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
+#ifdef V8_REVERSE_JSARGS
+ __ Pop(kScratchRegister); // Pop the receiver.
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(kScratchRegister);
+#else
__ PushRoot(RootIndex::kUndefinedValue);
+#endif
__ PushReturnAddressFrom(rbx);
__ incq(rax);
__ bind(&done);
@@ -1631,25 +1801,34 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the callable to call (passed as receiver) from the stack.
{
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ movq(rdi, args.GetReceiverOperand());
}
+#ifdef V8_REVERSE_JSARGS
+ // 3. Shift return address one slot down on the stack (overwriting the
+ // original receiver), making the original first argument the new receiver.
+ {
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
+ __ decq(rax); // One fewer argument (first argument is new receiver).
+ }
+#else
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
{
Label loop;
__ movq(rcx, rax);
- StackArgumentsAccessor args(rsp, rcx);
+ StackArgumentsAccessor args(rcx);
__ bind(&loop);
- __ movq(rbx, args.GetArgumentOperand(1));
- __ movq(args.GetArgumentOperand(0), rbx);
+ __ movq(rbx, args[1]);
+ __ movq(args[0], rbx);
__ decq(rcx);
__ j(not_zero, &loop); // While non-zero.
__ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
__ decq(rax); // One fewer argument (first argument is new receiver).
}
+#endif
// 4. Call the callable.
// Since we did not create a frame for Function.prototype.call() yet,
@@ -1661,10 +1840,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : argumentsList
- // -- rsp[16] : thisArgument
- // -- rsp[24] : target
- // -- rsp[32] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : thisArgument
+ // -- args[3] : argumentsList
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1672,18 +1852,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// thisArgument (if present) instead.
{
Label done;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movq(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdi, args[1]); // target
__ j(equal, &done, Label::kNear);
- __ movq(rdx, args.GetArgumentOperand(2)); // thisArgument
+ __ movq(rdx, args[2]); // thisArgument
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(3)); // argumentsList
+ __ movq(rbx, args[3]); // argumentsList
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leaq(rsp,
@@ -1712,10 +1892,11 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : new.target (optional)
- // -- rsp[16] : argumentsList
- // -- rsp[24] : target
- // -- rsp[32] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : argumentsList
+ // -- args[3] : new.target (optional)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1724,19 +1905,19 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// (if present) instead.
{
Label done;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movq(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdi, args[1]); // target
__ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(2)); // argumentsList
+ __ movq(rbx, args[2]); // argumentsList
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movq(rdx, args.GetArgumentOperand(3)); // new.target
+ __ movq(rdx, args[3]); // new.target
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leaq(rsp,
@@ -1809,14 +1990,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpq(rbx, Immediate(kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+
+#ifndef V8_REVERSE_JSARGS
+ // This optimization is disabled when the arguments are reversed.
__ testl(
FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
__ j(not_zero, &skip_adapt_arguments);
+#endif
// -------------------------------------------
// Adapt arguments.
@@ -1834,7 +2019,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
+#ifdef V8_REVERSE_JSARGS
+ __ leaq(r8, Operand(rbp, rbx, times_system_pointer_size, offset));
+#else
__ leaq(r8, Operand(rbp, rax, times_system_pointer_size, offset));
+#endif
__ Set(rax, -1); // account for receiver
Label copy;
@@ -1850,6 +2039,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Too few parameters: Actual < expected.
__ bind(&under_application);
{
+#ifdef V8_REVERSE_JSARGS
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ movq(r8, rbx);
+ __ subq(r8, rax);
+ __ bind(&fill);
+ __ Push(kScratchRegister);
+ __ decq(r8);
+ __ j(greater, &fill);
+
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
@@ -1863,6 +2063,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmpq(r8, rax);
__ j(less, &copy);
+ // Update actual number of arguments.
+ __ movq(rax, rbx);
+#else // !V8_REVERSE_JSARGS
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
+ __ Set(r8, -1); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(r8);
+ __ Push(Operand(r9, 0));
+ __ subq(r9, Immediate(kSystemPointerSize));
+ __ cmpq(r8, rax);
+ __ j(less, &copy);
+
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
@@ -1871,6 +2087,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Push(kScratchRegister);
__ cmpq(rax, rbx);
__ j(less, &fill);
+#endif // !V8_REVERSE_JSARGS
}
// Call the entry point.
@@ -1981,6 +2198,56 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
+#ifdef V8_REVERSE_JSARGS
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy, check;
+ Register src = r8, dest = rsp, num = r9, current = r11;
+ __ movq(src, rsp);
+ __ movq(kScratchRegister, rcx);
+ __ negq(kScratchRegister);
+ __ leaq(rsp, Operand(rsp, kScratchRegister, times_system_pointer_size,
+ 0)); // Update stack pointer.
+ __ leaq(num, Operand(rax, 2)); // Number of words to copy.
+ // +2 for receiver and return address.
+ __ Set(current, 0);
+ __ jmp(&check);
+ __ bind(&copy);
+ __ movq(kScratchRegister,
+ Operand(src, current, times_system_pointer_size, 0));
+ __ movq(Operand(dest, current, times_system_pointer_size, 0),
+ kScratchRegister);
+ __ incq(current);
+ __ bind(&check);
+ __ cmpq(current, num);
+ __ j(less, &copy);
+ __ leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
+ }
+
+ // Copy the additional arguments onto the stack.
+ {
+ Register value = scratch;
+ Register src = rbx, dest = r8, num = rcx, current = r9;
+ __ Set(current, 0);
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmpl(current, num);
+ __ j(equal, &done, Label::kNear);
+ // Turn the hole into undefined as we go.
+ __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(value, RootIndex::kTheHoleValue);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(value, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ movq(Operand(dest, current, times_system_pointer_size, 0), value);
+ __ incl(current);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ addq(rax, current);
+ }
+#else // !V8_REVERSE_JSARGS
{
Register value = scratch;
__ PopReturnAddressTo(r8);
@@ -2003,6 +2270,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ PushReturnAddressFrom(r8);
__ addq(rax, r9);
}
+#endif
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2076,9 +2344,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ PopReturnAddressTo(rcx);
__ bind(&loop);
{
- StackArgumentsAccessor args(rbx, r8, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ Push(args.GetArgumentOperand(0));
__ decl(r8);
+ __ Push(Operand(rbx, r8, times_system_pointer_size,
+ kFPOnStackSize + kPCOnStackSize));
__ j(not_zero, &loop);
}
__ PushReturnAddressFrom(rcx);
@@ -2101,7 +2369,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ AssertFunction(rdi);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
@@ -2233,6 +2501,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
// -----------------------------------
+ // TODO(victor): Use Generate_StackOverflowCheck here.
// Check the stack for overflow.
{
Label done;
@@ -2242,7 +2511,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit".
- __ cmpq(kScratchRegister, RealStackLimitAsOperand(masm));
+ __ cmpq(kScratchRegister,
+ StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(above_equal, &done, Label::kNear);
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2252,6 +2522,35 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+#ifdef V8_REVERSE_JSARGS
+ // Save Return Address and Receiver into registers.
+ __ Pop(r8);
+ __ Pop(r10);
+
+ // Push [[BoundArguments]] to the stack.
+ {
+ Label loop;
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ addq(rax, rbx); // Adjust effective number of arguments.
+ __ bind(&loop);
+ // Instead of doing decl(rbx) here subtract kTaggedSize from the header
+ // offset in order to be able to move decl(rbx) right before the loop
+ // condition. This is necessary in order to avoid flags corruption by
+ // pointer decompression code.
+ __ LoadAnyTaggedField(
+ r12, FieldOperand(rcx, rbx, times_tagged_size,
+ FixedArray::kHeaderSize - kTaggedSize));
+ __ Push(r12);
+ __ decl(rbx);
+ __ j(greater, &loop);
+ }
+
+ // Recover Receiver and Return Address.
+ __ Push(r10);
+ __ Push(r8);
+#else // !V8_REVERSE_JSARGS
// Reserve stack space for the [[BoundArguments]].
__ movq(kScratchRegister, rbx);
__ AllocateStackSpace(kScratchRegister);
@@ -2282,7 +2581,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
// Instead of doing decl(rbx) here subtract kTaggedSize from the header
- // offset in order to move be able to move decl(rbx) right before the loop
+ // offset in order be able to move decl(rbx) right before the loop
// condition. This is necessary in order to avoid flags corruption by
// pointer decompression code.
__ LoadAnyTaggedField(
@@ -2298,6 +2597,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// arguments from the call plus return address plus the number of
// [[BoundArguments]]), so we need to subtract one for the return address.
__ decl(rax);
+#endif // !V8_REVERSE_JSARGS
}
__ bind(&no_bound_arguments);
}
@@ -2313,7 +2613,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(rdi);
// Patch the receiver to [[BoundThis]].
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadAnyTaggedField(rbx,
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ movq(args.GetReceiverOperand(), rbx);
@@ -2334,7 +2634,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
Label non_callable;
__ JumpIfSmi(rdi, &non_callable);
@@ -2439,7 +2739,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
// Check if target is a Smi.
Label non_constructor;
@@ -2549,7 +2849,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
- // Push the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r11);
@@ -2575,6 +2875,49 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ jmp(r11);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(Register::from_code(reg_code));
+ }
+
+ constexpr int kFpStackSize =
+ kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
+ __ AllocateStackSpace(kFpStackSize);
+ int offset = kFpStackSize;
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ offset -= kSimd128Size;
+ __ movdqu(Operand(rsp, offset), DoubleRegister::from_code(reg_code));
+ }
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ __ movdqu(DoubleRegister::from_code(reg_code), Operand(rsp, offset));
+ offset += kSimd128Size;
+ }
+ __ addq(rsp, Immediate(kFpStackSize));
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Pop(Register::from_code(reg_code));
+ }
+ }
+
+ __ ret(0);
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2979,11 +3322,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- rbx : call data
// -- rdi : holder
// -- rsp[0] : return address
- // -- rsp[8] : last argument
+ // -- rsp[8] : argument argc
// -- ...
- // -- rsp[argc * 8] : first argument
- // -- rsp[(argc + 1) * 8] : receiver
+ // -- rsp[argc * 8] : argument 1
+ // -- rsp[(argc + 1) * 8] : argument 0 (receiver)
// -----------------------------------
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = rdx;
Register argc = rcx;
@@ -3042,8 +3386,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+#ifdef V8_REVERSE_JSARGS
+ __ leaq(scratch,
+ Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
+#else
__ leaq(scratch, Operand(scratch, argc, times_system_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
+#endif
__ movq(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.