From 12478684aab233942e0d5dc24f195930c8a5e59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 13 Jul 2020 10:39:42 +0200 Subject: deps: update V8 to 8.4.371.19 PR-URL: https://github.com/nodejs/node/pull/33579 Reviewed-By: Matteo Collina Reviewed-By: Jiawen Geng Reviewed-By: Michael Dawson Reviewed-By: Rich Trott Reviewed-By: Shelley Vohr --- deps/v8/src/builtins/mips64/builtins-mips64.cc | 35 +++++++++++++++++++++----- 1 file changed, 29 insertions(+), 6 deletions(-) (limited to 'deps/v8/src/builtins/mips64') diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index baf2d5bfec..babe084bb0 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -930,15 +930,25 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, // Advance the current bytecode offset. This simulates what all bytecode // handlers do upon completion of the underlying operation. Will bail out to a -// label if the bytecode (without prefix) is a return bytecode. +// label if the bytecode (without prefix) is a return bytecode. Will not advance +// the bytecode offset if the current bytecode is a JumpLoop, instead just +// re-executing the JumpLoop to jump to the correct bytecode. static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, - Register scratch2, Label* if_return) { + Register scratch2, Register scratch3, + Label* if_return) { Register bytecode_size_table = scratch1; - DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, - bytecode)); + + // The bytecode offset value will be increased by one in wide and extra wide + // cases. In the case of having a wide or extra wide JumpLoop bytecode, we + // will restore the original bytecode. In order to simplify the code, we have + // a backup of it. + Register original_bytecode_offset = scratch3; + DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, + bytecode_size_table, original_bytecode_offset)); + __ Move(original_bytecode_offset, bytecode_offset); __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); // Check if the bytecode is a Wide or ExtraWide prefix bytecode. @@ -977,10 +987,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) #undef JUMP_IF_EQUAL + // If this is a JumpLoop, re-execute it to perform the jump to the beginning + // of the loop. + Label end, not_jump_loop; + __ Branch(¬_jump_loop, ne, bytecode, + Operand(static_cast(interpreter::Bytecode::kJumpLoop))); + // We need to restore the original bytecode_offset since we might have + // increased it to skip the wide / extra-wide prefix bytecode. + __ Move(bytecode_offset, original_bytecode_offset); + __ jmp(&end); + + __ bind(¬_jump_loop); // Otherwise, load the size of the current bytecode and advance the offset. __ Dlsa(scratch2, bytecode_size_table, bytecode, 2); __ Lw(scratch2, MemOperand(scratch2)); __ Daddu(bytecode_offset, bytecode_offset, scratch2); + + __ bind(&end); } // Generate code for entering a JS function with the interpreter. @@ -1153,7 +1176,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Lbu(a1, MemOperand(a1)); AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister, a1, a2, a3, - &do_return); + a4, &do_return); __ jmp(&do_dispatch); __ bind(&do_return); @@ -1430,7 +1453,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { Label if_return; AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister, a1, a2, a3, - &if_return); + a4, &if_return); __ bind(&enter_bytecode); // Convert new bytecode offset to a Smi and save in the stackframe. -- cgit v1.2.1