diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-12 14:27:29 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-13 09:35:20 +0000 |
commit | c30a6232df03e1efbd9f3b226777b07e087a1122 (patch) | |
tree | e992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/execution | |
parent | 7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff) | |
download | qtwebengine-chromium-85-based.tar.gz |
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/execution')
24 files changed, 746 insertions, 366 deletions
diff --git a/chromium/v8/src/execution/arm/simulator-arm.cc b/chromium/v8/src/execution/arm/simulator-arm.cc index 019542b12d4..ddfc5650b56 100644 --- a/chromium/v8/src/execution/arm/simulator-arm.cc +++ b/chromium/v8/src/execution/arm/simulator-arm.cc @@ -1567,7 +1567,7 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1); using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1, void* arg2); -// Separate for fine-grained UBSan blacklisting. Casting any given C++ +// Separate for fine-grained UBSan blocklisting. Casting any given C++ // function to {SimulatorRuntimeCall} is undefined behavior; but since // the target function can indeed be any function that's exposed via // the "fast C call" mechanism, we can't reconstruct its signature here. @@ -5375,7 +5375,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { } else { UNIMPLEMENTED(); } - } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) { + } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(17, 16) == 0x3 && + instr->Bits(11, 8) == 0x5) { // vrecpe/vrsqrte.f32 Qd, Qm. int Vd = instr->VFPDRegValue(kSimd128Precision); int Vm = instr->VFPMRegValue(kSimd128Precision); @@ -5442,6 +5443,39 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { UNIMPLEMENTED(); break; } + } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) { + // vrint<q>.<dt> <Dd>, <Dm> + // vrint<q>.<dt> <Qd>, <Qm> + // See F6.1.205 + int regs = instr->Bit(6) + 1; + int rounding_mode = instr->Bits(9, 7); + float (*fproundint)(float) = nullptr; + switch (rounding_mode) { + case 3: + fproundint = &truncf; + break; + case 5: + fproundint = &floorf; + break; + case 7: + fproundint = &ceilf; + break; + default: + UNIMPLEMENTED(); + } + int vm = instr->VFPMRegValue(kDoublePrecision); + int vd = instr->VFPDRegValue(kDoublePrecision); + + float floats[2]; + for (int r = 0; r < regs; r++) { + // We cannot simply use GetVFPSingleValue since our Q registers + // might not map to any S registers at all. + get_neon_register<float, kDoubleSize>(vm + r, floats); + for (int e = 0; e < 2; e++) { + floats[e] = canonicalizeNaN(fproundint(floats[e])); + } + set_neon_register<float, kDoubleSize>(vd + r, floats); + } } else { UNIMPLEMENTED(); } @@ -5658,12 +5692,12 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { int32_t address = get_register(Rn); int regs = instr->Bit(5) + 1; int size = instr->Bits(7, 6); - uint32_t q_data[4]; + uint32_t q_data[2]; switch (size) { case Neon8: { uint8_t data = ReadBU(address); uint8_t* dst = reinterpret_cast<uint8_t*>(q_data); - for (int i = 0; i < 16; i++) { + for (int i = 0; i < 8; i++) { dst[i] = data; } break; @@ -5671,21 +5705,21 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { case Neon16: { uint16_t data = ReadHU(address); uint16_t* dst = reinterpret_cast<uint16_t*>(q_data); - for (int i = 0; i < 8; i++) { + for (int i = 0; i < 4; i++) { dst[i] = data; } break; } case Neon32: { uint32_t data = ReadW(address); - for (int i = 0; i < 4; i++) { + for (int i = 0; i < 2; i++) { q_data[i] = data; } break; } } for (int r = 0; r < regs; r++) { - set_neon_register(Vd + r, q_data); + set_neon_register<uint32_t, kDoubleSize>(Vd + r, q_data); } if (Rm != 15) { if (Rm == 13) { diff --git a/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc b/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc index 36e792b752e..eaa88445ec2 100644 --- a/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc +++ b/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc @@ -10,8 +10,8 @@ namespace v8 { namespace internal { // Randomly generated example key for simulating only. -const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71, - 0xab9fd4e14b2fec51, 0}; +const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8, + 0x5267ac6fc280fb7c, 1}; namespace { diff --git a/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h b/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h index c54a59f29c7..e4bc476b3d0 100644 --- a/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h +++ b/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h @@ -10,11 +10,6 @@ #include "src/common/globals.h" #include "src/execution/arm64/simulator-arm64.h" -// TODO(v8:10026): Replace hints with instruction aliases, when supported. -#define AUTIA1716 "hint #12" -#define PACIA1716 "hint #8" -#define XPACLRI "hint #7" - namespace v8 { namespace internal { @@ -31,13 +26,13 @@ V8_INLINE Address PointerAuthentication::AuthenticatePC( uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp; uint64_t pc = reinterpret_cast<uint64_t>(*pc_address); #ifdef USE_SIMULATOR - pc = Simulator::AuthPAC(pc, sp, Simulator::kPACKeyIA, + pc = Simulator::AuthPAC(pc, sp, Simulator::kPACKeyIB, Simulator::kInstructionPointer); #else asm volatile( " mov x17, %[pc]\n" " mov x16, %[stack_ptr]\n" - " " AUTIA1716 "\n" + " autib1716\n" " ldr xzr, [x17]\n" " mov %[pc], x17\n" : [pc] "+r"(pc) @@ -55,7 +50,7 @@ V8_INLINE Address PointerAuthentication::StripPAC(Address pc) { asm volatile( " mov x16, lr\n" " mov lr, %[pc]\n" - " " XPACLRI "\n" + " xpaclri\n" " mov %[pc], lr\n" " mov lr, x16\n" : [pc] "+r"(pc) @@ -68,13 +63,13 @@ V8_INLINE Address PointerAuthentication::StripPAC(Address pc) { // Sign {pc} using {sp}. V8_INLINE Address PointerAuthentication::SignPCWithSP(Address pc, Address sp) { #ifdef USE_SIMULATOR - return Simulator::AddPAC(pc, sp, Simulator::kPACKeyIA, + return Simulator::AddPAC(pc, sp, Simulator::kPACKeyIB, Simulator::kInstructionPointer); #else asm volatile( " mov x17, %[pc]\n" " mov x16, %[sp]\n" - " " PACIA1716 "\n" + " pacib1716\n" " mov %[pc], x17\n" : [pc] "+r"(pc) : [sp] "r"(sp) @@ -92,13 +87,13 @@ V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address, uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp; uint64_t old_pc = reinterpret_cast<uint64_t>(*pc_address); #ifdef USE_SIMULATOR - uint64_t auth_old_pc = Simulator::AuthPAC(old_pc, sp, Simulator::kPACKeyIA, + uint64_t auth_old_pc = Simulator::AuthPAC(old_pc, sp, Simulator::kPACKeyIB, Simulator::kInstructionPointer); uint64_t raw_old_pc = Simulator::StripPAC(old_pc, Simulator::kInstructionPointer); // Verify that the old address is authenticated. CHECK_EQ(auth_old_pc, raw_old_pc); - new_pc = Simulator::AddPAC(new_pc, sp, Simulator::kPACKeyIA, + new_pc = Simulator::AddPAC(new_pc, sp, Simulator::kPACKeyIB, Simulator::kInstructionPointer); #else // Only store newly signed address after we have verified that the old @@ -106,10 +101,10 @@ V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address, asm volatile( " mov x17, %[new_pc]\n" " mov x16, %[sp]\n" - " " PACIA1716 "\n" + " pacib1716\n" " mov %[new_pc], x17\n" " mov x17, %[old_pc]\n" - " " AUTIA1716 "\n" + " autib1716\n" " ldr xzr, [x17]\n" : [new_pc] "+&r"(new_pc) : [sp] "r"(sp), [old_pc] "r"(old_pc) @@ -127,13 +122,13 @@ V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address, uint64_t new_pc; #ifdef USE_SIMULATOR uint64_t auth_pc = - Simulator::AuthPAC(old_signed_pc, old_context, Simulator::kPACKeyIA, + Simulator::AuthPAC(old_signed_pc, old_context, Simulator::kPACKeyIB, Simulator::kInstructionPointer); uint64_t raw_pc = Simulator::StripPAC(auth_pc, Simulator::kInstructionPointer); // Verify that the old address is authenticated. CHECK_EQ(raw_pc, auth_pc); - new_pc = Simulator::AddPAC(raw_pc, new_context, Simulator::kPACKeyIA, + new_pc = Simulator::AddPAC(raw_pc, new_context, Simulator::kPACKeyIB, Simulator::kInstructionPointer); #else // Only store newly signed address after we have verified that the old @@ -141,13 +136,13 @@ V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address, asm volatile( " mov x17, %[old_pc]\n" " mov x16, %[old_ctx]\n" - " " AUTIA1716 "\n" + " autib1716\n" " mov x16, %[new_ctx]\n" - " " PACIA1716 "\n" + " pacib1716\n" " mov %[new_pc], x17\n" " mov x17, %[old_pc]\n" " mov x16, %[old_ctx]\n" - " " AUTIA1716 "\n" + " autib1716\n" " ldr xzr, [x17]\n" : [new_pc] "=&r"(new_pc) : [old_pc] "r"(old_signed_pc), [old_ctx] "r"(old_context), diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.cc b/chromium/v8/src/execution/arm64/simulator-arm64.cc index adc856a6066..4d9205f0537 100644 --- a/chromium/v8/src/execution/arm64/simulator-arm64.cc +++ b/chromium/v8/src/execution/arm64/simulator-arm64.cc @@ -445,7 +445,7 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, void* arg2); -// Separate for fine-grained UBSan blacklisting. Casting any given C++ +// Separate for fine-grained UBSan blocklisting. Casting any given C++ // function to {SimulatorRuntimeCall} is undefined behavior; but since // the target function can indeed be any function that's exposed via // the "fast C call" mechanism, we can't reconstruct its signature here. @@ -2756,6 +2756,9 @@ void Simulator::VisitFPIntegerConvert(Instruction* instr) { case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break; + case FJCVTZS: + set_wreg(dst, FPToFixedJS(dreg(src))); + break; case FMOV_ws: set_wreg(dst, sreg_bits(src)); break; @@ -3125,8 +3128,8 @@ bool Simulator::FPProcessNaNs(Instruction* instr) { // clang-format off #define PAUTH_SYSTEM_MODES(V) \ - V(A1716, 17, xreg(16), kPACKeyIA) \ - V(ASP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIA) + V(B1716, 17, xreg(16), kPACKeyIB) \ + V(BSP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIB) // clang-format on void Simulator::VisitSystem(Instruction* instr) { @@ -3134,7 +3137,7 @@ void Simulator::VisitSystem(Instruction* instr) { // range of immediates instead of indicating a different instruction. This // makes the decoding tricky. if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) { - // The BType check for PACIASP happens in CheckBType(). + // The BType check for PACIBSP happens in CheckBType(). switch (instr->Mask(SystemPAuthMask)) { #define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \ case PACI##SUFFIX: \ diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.h b/chromium/v8/src/execution/arm64/simulator-arm64.h index cd4137c8e51..ee6d6341825 100644 --- a/chromium/v8/src/execution/arm64/simulator-arm64.h +++ b/chromium/v8/src/execution/arm64/simulator-arm64.h @@ -828,8 +828,8 @@ class Simulator : public DecoderVisitor, public SimulatorBase { void CheckBTypeForPAuth() { DCHECK(pc_->IsPAuth()); Instr instr = pc_->Mask(SystemPAuthMask); - // Only PACI[AB]SP allowed here, but we don't currently support PACIBSP. - CHECK_EQ(instr, PACIASP); + // Only PACI[AB]SP allowed here, and we only support PACIBSP. + CHECK(instr == PACIBSP); // Check BType allows PACI[AB]SP instructions. switch (btype()) { case BranchFromGuardedNotToIP: @@ -837,7 +837,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { // here to be set. This makes PACI[AB]SP behave like "BTI c", // disallowing its execution when BTYPE is BranchFromGuardedNotToIP // (0b11). - FATAL("Executing PACIASP with wrong BType."); + FATAL("Executing PACIBSP with wrong BType."); case BranchFromUnguardedOrToIP: case BranchAndLink: break; @@ -1397,7 +1397,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { int number; }; - static const PACKey kPACKeyIA; + static const PACKey kPACKeyIB; // Current implementation is that all pointers are tagged. static bool HasTBI(uint64_t ptr, PointerType type) { @@ -2179,6 +2179,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { int64_t FPToInt64(double value, FPRounding rmode); uint32_t FPToUInt32(double value, FPRounding rmode); uint64_t FPToUInt64(double value, FPRounding rmode); + int32_t FPToFixedJS(double value); template <typename T> T FPAdd(T op1, T op2); diff --git a/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc b/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc index d855c8b7084..db39408a49e 100644 --- a/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc +++ b/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc @@ -3342,6 +3342,65 @@ LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst, return dst; } +int32_t Simulator::FPToFixedJS(double value) { + // The Z-flag is set when the conversion from double precision floating-point + // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN, + // outside the bounds of a 32-bit integer, or isn't an exact integer then the + // Z-flag is unset. + int Z = 1; + int32_t result; + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + // +/- zero and infinity all return zero, however -0 and +/- Infinity also + // unset the Z-flag. + result = 0.0; + if ((value != 0.0) || std::signbit(value)) { + Z = 0; + } + } else if (std::isnan(value)) { + // NaN values unset the Z-flag and set the result to 0. + result = 0; + Z = 0; + } else { + // All other values are converted to an integer representation, rounded + // toward zero. + double int_result = std::floor(value); + double error = value - int_result; + if ((error != 0.0) && (int_result < 0.0)) { + int_result++; + } + // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost + // write a one-liner with std::round, but the behaviour on ties is incorrect + // for our purposes. + double mod_const = static_cast<double>(UINT64_C(1) << 32); + double mod_error = + (int_result / mod_const) - std::floor(int_result / mod_const); + double constrained; + if (mod_error == 0.5) { + constrained = INT32_MIN; + } else { + constrained = int_result - mod_const * round(int_result / mod_const); + } + DCHECK(std::floor(constrained) == constrained); + DCHECK(constrained >= INT32_MIN); + DCHECK(constrained <= INT32_MAX); + // Take the bottom 32 bits of the result as a 32-bit integer. + result = static_cast<int32_t>(constrained); + if ((int_result < INT32_MIN) || (int_result > INT32_MAX) || + (error != 0.0)) { + // If the integer result is out of range or the conversion isn't exact, + // take exception and unset the Z-flag. + FPProcessException(); + Z = 0; + } + } + nzcv().SetN(0); + nzcv().SetZ(Z); + nzcv().SetC(0); + nzcv().SetV(0); + return result; +} + LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2) { diff --git a/chromium/v8/src/execution/frames-inl.h b/chromium/v8/src/execution/frames-inl.h index ecd45abeb19..70db742a718 100644 --- a/chromium/v8/src/execution/frames-inl.h +++ b/chromium/v8/src/execution/frames-inl.h @@ -77,6 +77,10 @@ inline Address StackFrame::callee_pc() const { inline Address StackFrame::pc() const { return ReadPC(pc_address()); } +inline Address StackFrame::unauthenticated_pc() const { + return PointerAuthentication::StripPAC(*pc_address()); +} + inline Address StackFrame::ReadPC(Address* pc_address) { return PointerAuthentication::AuthenticatePC(pc_address, kSystemPointerSize); } diff --git a/chromium/v8/src/execution/frames.cc b/chromium/v8/src/execution/frames.cc index b6fc4cb7540..e714a514c8a 100644 --- a/chromium/v8/src/execution/frames.cc +++ b/chromium/v8/src/execution/frames.cc @@ -315,6 +315,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc, // return address into the interpreter entry trampoline, then we are likely // in a bytecode handler with elided frame. In that case, set the PC // properly and make sure we do not drop the frame. + bool is_no_frame_bytecode_handler = false; if (IsNoFrameBytecodeHandlerPc(isolate, pc, fp)) { Address* tos_location = nullptr; if (top_link_register_) { @@ -326,6 +327,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc, if (IsInterpreterFramePc(isolate, *tos_location, &state)) { state.pc_address = tos_location; + is_no_frame_bytecode_handler = true; advance_frame = false; } } @@ -338,12 +340,12 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc, StandardFrameConstants::kContextOffset); Address frame_marker = fp + StandardFrameConstants::kFunctionOffset; if (IsValidStackAddress(frame_marker)) { - type = StackFrame::ComputeType(this, &state); - top_frame_type_ = type; - // We only keep the top frame if we believe it to be interpreted frame. - if (type != StackFrame::INTERPRETED) { - advance_frame = true; + if (is_no_frame_bytecode_handler) { + type = StackFrame::INTERPRETED; + } else { + type = StackFrame::ComputeType(this, &state); } + top_frame_type_ = type; MSAN_MEMORY_IS_INITIALIZED( fp + CommonFrameConstants::kContextOrFrameTypeOffset, kSystemPointerSize); diff --git a/chromium/v8/src/execution/frames.h b/chromium/v8/src/execution/frames.h index cd0156a8877..8186ab8641b 100644 --- a/chromium/v8/src/execution/frames.h +++ b/chromium/v8/src/execution/frames.h @@ -29,7 +29,6 @@ class RootVisitor; class StackFrameIteratorBase; class StringStream; class ThreadLocalTop; -class WasmDebugInfo; class WasmInstanceObject; class WasmModuleObject; @@ -221,6 +220,11 @@ class StackFrame { inline Address pc() const; + // Skip authentication of the PC, when using CFI. Used in the profiler, where + // in certain corner-cases we do not use an address on the stack, which would + // be signed, as the PC of the frame. + inline Address unauthenticated_pc() const; + Address constant_pool() const { return *constant_pool_address(); } void set_constant_pool(Address constant_pool) { *constant_pool_address() = constant_pool; diff --git a/chromium/v8/src/execution/futex-emulation.cc b/chromium/v8/src/execution/futex-emulation.cc index 3f815e24ca1..6804f473091 100644 --- a/chromium/v8/src/execution/futex-emulation.cc +++ b/chromium/v8/src/execution/futex-emulation.cc @@ -89,11 +89,11 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) { int val = Smi::ToInt(res); switch (val) { case WaitReturnValue::kOk: - return ReadOnlyRoots(isolate).ok(); + return ReadOnlyRoots(isolate).ok_string(); case WaitReturnValue::kNotEqual: - return ReadOnlyRoots(isolate).not_equal(); + return ReadOnlyRoots(isolate).not_equal_string(); case WaitReturnValue::kTimedOut: - return ReadOnlyRoots(isolate).timed_out(); + return ReadOnlyRoots(isolate).timed_out_string(); default: UNREACHABLE(); } @@ -193,8 +193,9 @@ Object FutexEmulation::Wait(Isolate* isolate, do { // Not really a loop, just makes it easier to break out early. base::MutexGuard lock_guard(mutex_.Pointer()); - void* backing_store = array_buffer->backing_store(); - + std::shared_ptr<BackingStore> backing_store = + array_buffer->GetBackingStore(); + DCHECK(backing_store); FutexWaitListNode* node = isolate->futex_wait_list_node(); node->backing_store_ = backing_store; node->wait_addr_ = addr; @@ -204,7 +205,8 @@ Object FutexEmulation::Wait(Isolate* isolate, // still holding the lock). ResetWaitingOnScopeExit reset_waiting(node); - T* p = reinterpret_cast<T*>(static_cast<int8_t*>(backing_store) + addr); + T* p = reinterpret_cast<T*>( + static_cast<int8_t*>(backing_store->buffer_start()) + addr); if (*p != value) { result = handle(Smi::FromInt(WaitReturnValue::kNotEqual), isolate); callback_result = AtomicsWaitEvent::kNotEqual; @@ -308,13 +310,16 @@ Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr, DCHECK_LT(addr, array_buffer->byte_length()); int waiters_woken = 0; - void* backing_store = array_buffer->backing_store(); + std::shared_ptr<BackingStore> backing_store = array_buffer->GetBackingStore(); base::MutexGuard lock_guard(mutex_.Pointer()); FutexWaitListNode* node = wait_list_.Pointer()->head_; while (node && num_waiters_to_wake > 0) { - if (backing_store == node->backing_store_ && addr == node->wait_addr_ && - node->waiting_) { + std::shared_ptr<BackingStore> node_backing_store = + node->backing_store_.lock(); + DCHECK(node_backing_store); + if (backing_store.get() == node_backing_store.get() && + addr == node->wait_addr_ && node->waiting_) { node->waiting_ = false; node->cond_.NotifyOne(); if (num_waiters_to_wake != kWakeAll) { @@ -332,15 +337,18 @@ Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr, Object FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer, size_t addr) { DCHECK_LT(addr, array_buffer->byte_length()); - void* backing_store = array_buffer->backing_store(); + std::shared_ptr<BackingStore> backing_store = array_buffer->GetBackingStore(); base::MutexGuard lock_guard(mutex_.Pointer()); int waiters = 0; FutexWaitListNode* node = wait_list_.Pointer()->head_; while (node) { - if (backing_store == node->backing_store_ && addr == node->wait_addr_ && - node->waiting_) { + std::shared_ptr<BackingStore> node_backing_store = + node->backing_store_.lock(); + DCHECK(node_backing_store); + if (backing_store.get() == node_backing_store.get() && + addr == node->wait_addr_ && node->waiting_) { waiters++; } diff --git a/chromium/v8/src/execution/futex-emulation.h b/chromium/v8/src/execution/futex-emulation.h index 2d005bcfd19..03ad310fd21 100644 --- a/chromium/v8/src/execution/futex-emulation.h +++ b/chromium/v8/src/execution/futex-emulation.h @@ -30,6 +30,7 @@ class TimeDelta; namespace internal { +class BackingStore; template <typename T> class Handle; class Isolate; @@ -52,7 +53,6 @@ class FutexWaitListNode { FutexWaitListNode() : prev_(nullptr), next_(nullptr), - backing_store_(nullptr), wait_addr_(0), waiting_(false), interrupted_(false) {} @@ -68,7 +68,7 @@ class FutexWaitListNode { // prev_ and next_ are protected by FutexEmulation::mutex_. FutexWaitListNode* prev_; FutexWaitListNode* next_; - void* backing_store_; + std::weak_ptr<BackingStore> backing_store_; size_t wait_addr_; // waiting_ and interrupted_ are protected by FutexEmulation::mutex_ // if this node is currently contained in FutexEmulation::wait_list_ @@ -126,20 +126,25 @@ class FutexEmulation : public AllStatic { // Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed // out) as expected by Wasm. - static Object WaitWasm32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer, - size_t addr, int32_t value, int64_t rel_timeout_ns); + V8_EXPORT_PRIVATE static Object WaitWasm32(Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + size_t addr, int32_t value, + int64_t rel_timeout_ns); // Same as Wait32 above except it checks for an int64_t value in the // array_buffer. - static Object WaitWasm64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer, - size_t addr, int64_t value, int64_t rel_timeout_ns); + V8_EXPORT_PRIVATE static Object WaitWasm64(Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + size_t addr, int64_t value, + int64_t rel_timeout_ns); // Wake |num_waiters_to_wake| threads that are waiting on the given |addr|. // |num_waiters_to_wake| can be kWakeAll, in which case all waiters are // woken. The rest of the waiters will continue to wait. The return value is // the number of woken waiters. - static Object Wake(Handle<JSArrayBuffer> array_buffer, size_t addr, - uint32_t num_waiters_to_wake); + V8_EXPORT_PRIVATE static Object Wake(Handle<JSArrayBuffer> array_buffer, + size_t addr, + uint32_t num_waiters_to_wake); // Return the number of threads waiting on |addr|. Should only be used for // testing. diff --git a/chromium/v8/src/execution/isolate.cc b/chromium/v8/src/execution/isolate.cc index bea08a16b83..98b98d5bea7 100644 --- a/chromium/v8/src/execution/isolate.cc +++ b/chromium/v8/src/execution/isolate.cc @@ -32,6 +32,7 @@ #include "src/debug/debug-frames.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" +#include "src/diagnostics/basic-block-profiler.h" #include "src/diagnostics/compilation-statistics.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" @@ -588,21 +589,28 @@ class FrameArrayBuilder { offset, flags, parameters); } - void AppendPromiseAllFrame(Handle<Context> context, int offset) { + void AppendPromiseCombinatorFrame(Handle<JSFunction> element_function, + Handle<JSFunction> combinator, + FrameArray::Flag combinator_flag, + Handle<Context> context) { if (full()) return; - int flags = FrameArray::kIsAsync | FrameArray::kIsPromiseAll; + int flags = FrameArray::kIsAsync | combinator_flag; Handle<Context> native_context(context->native_context(), isolate_); - Handle<JSFunction> function(native_context->promise_all(), isolate_); - if (!IsVisibleInStackTrace(function)) return; + if (!IsVisibleInStackTrace(combinator)) return; Handle<Object> receiver(native_context->promise_function(), isolate_); - Handle<AbstractCode> code(AbstractCode::cast(function->code()), isolate_); + Handle<AbstractCode> code(AbstractCode::cast(combinator->code()), isolate_); - // TODO(mmarchini) save Promises list from Promise.all() + // TODO(mmarchini) save Promises list from the Promise combinator Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array(); - elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code, + // We store the offset of the promise into the element function's + // hash field for element callbacks. + int const offset = + Smi::ToInt(Smi::cast(element_function->GetIdentityHash())) - 1; + + elements_ = FrameArray::AppendJSFrame(elements_, receiver, combinator, code, offset, flags, parameters); } @@ -861,11 +869,10 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise, Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()), isolate); Handle<Context> context(function->context(), isolate); - - // We store the offset of the promise into the {function}'s - // hash field for promise resolve element callbacks. - int const offset = Smi::ToInt(Smi::cast(function->GetIdentityHash())) - 1; - builder->AppendPromiseAllFrame(context, offset); + Handle<JSFunction> combinator(context->native_context().promise_all(), + isolate); + builder->AppendPromiseCombinatorFrame(function, combinator, + FrameArray::kIsPromiseAll, context); // Now peak into the Promise.all() resolve element context to // find the promise capability that's being resolved when all @@ -876,6 +883,24 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise, PromiseCapability::cast(context->get(index)), isolate); if (!capability->promise().IsJSPromise()) return; promise = handle(JSPromise::cast(capability->promise()), isolate); + } else if (IsBuiltinFunction(isolate, reaction->reject_handler(), + Builtins::kPromiseAnyRejectElementClosure)) { + Handle<JSFunction> function(JSFunction::cast(reaction->reject_handler()), + isolate); + Handle<Context> context(function->context(), isolate); + Handle<JSFunction> combinator(context->native_context().promise_any(), + isolate); + builder->AppendPromiseCombinatorFrame(function, combinator, + FrameArray::kIsPromiseAny, context); + + // Now peak into the Promise.any() reject element context to + // find the promise capability that's being resolved when any of + // the concurrent promises resolve. + int const index = PromiseBuiltins::kPromiseAnyRejectElementCapabilitySlot; + Handle<PromiseCapability> capability( + PromiseCapability::cast(context->get(index)), isolate); + if (!capability->promise().IsJSPromise()) return; + promise = handle(JSPromise::cast(capability->promise()), isolate); } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(), Builtins::kPromiseCapabilityDefaultResolve)) { Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()), @@ -2491,6 +2516,10 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions( stack_trace_for_uncaught_exceptions_options_ = options; } +bool Isolate::get_capture_stack_trace_for_uncaught_exceptions() const { + return capture_stack_trace_for_uncaught_exceptions_; +} + void Isolate::SetAbortOnUncaughtExceptionCallback( v8::Isolate::AbortOnUncaughtExceptionCallback callback) { abort_on_uncaught_exception_callback_ = callback; @@ -2632,77 +2661,110 @@ void Isolate::ThreadDataTable::RemoveAllThreads() { table_.clear(); } -class VerboseAccountingAllocator : public AccountingAllocator { +class TracingAccountingAllocator : public AccountingAllocator { public: - VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes) - : heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {} + explicit TracingAccountingAllocator(Isolate* isolate) : isolate_(isolate) {} - v8::internal::Segment* AllocateSegment(size_t size) override { - v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size); - if (!memory) return nullptr; - size_t malloced_current = GetCurrentMemoryUsage(); + protected: + void TraceAllocateSegmentImpl(v8::internal::Segment* segment) override { + base::MutexGuard lock(&mutex_); + UpdateMemoryTrafficAndReportMemoryUsage(segment->total_size()); + } - if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) { - PrintMemoryJSON(malloced_current); - last_memory_usage_ = malloced_current; - } - return memory; + void TraceZoneCreationImpl(const Zone* zone) override { + base::MutexGuard lock(&mutex_); + active_zones_.insert(zone); + nesting_depth_++; } - void ReturnSegment(v8::internal::Segment* memory) override { - AccountingAllocator::ReturnSegment(memory); - size_t malloced_current = GetCurrentMemoryUsage(); + void TraceZoneDestructionImpl(const Zone* zone) override { + base::MutexGuard lock(&mutex_); + UpdateMemoryTrafficAndReportMemoryUsage(zone->segment_bytes_allocated()); + active_zones_.erase(zone); + nesting_depth_--; + } - if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) { - PrintMemoryJSON(malloced_current); - last_memory_usage_ = malloced_current; + private: + void UpdateMemoryTrafficAndReportMemoryUsage(size_t memory_traffic_delta) { + memory_traffic_since_last_report_ += memory_traffic_delta; + if (memory_traffic_since_last_report_ < FLAG_zone_stats_tolerance) return; + memory_traffic_since_last_report_ = 0; + + Dump(buffer_, true); + + { + std::string trace_str = buffer_.str(); + + if (FLAG_trace_zone_stats) { + PrintF( + "{" + "\"type\": \"v8-zone-trace\", " + "\"stats\": %s" + "}\n", + trace_str.c_str()); + } + if (V8_UNLIKELY( + TracingFlags::zone_stats.load(std::memory_order_relaxed) & + v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) { + TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats"), + "V8.Zone_Stats", TRACE_EVENT_SCOPE_THREAD, "stats", + TRACE_STR_COPY(trace_str.c_str())); + } } - } - void ZoneCreation(const Zone* zone) override { - PrintZoneModificationSample(zone, "zonecreation"); - nesting_deepth_++; + // Clear the buffer. + buffer_.str(std::string()); } - void ZoneDestruction(const Zone* zone) override { - nesting_deepth_--; - PrintZoneModificationSample(zone, "zonedestruction"); + void Dump(std::ostringstream& out, bool dump_details) { + // Note: Neither isolate nor zones are locked, so be careful with accesses + // as the allocator is potentially used on a concurrent thread. + double time = isolate_->time_millis_since_init(); + out << "{" + << "\"isolate\": \"" << reinterpret_cast<void*>(isolate_) << "\", " + << "\"time\": " << time << ", "; + size_t total_segment_bytes_allocated = 0; + size_t total_zone_allocation_size = 0; + + if (dump_details) { + // Print detailed zone stats if memory usage changes direction. + out << "\"zones\": ["; + bool first = true; + for (const Zone* zone : active_zones_) { + size_t zone_segment_bytes_allocated = zone->segment_bytes_allocated(); + size_t zone_allocation_size = zone->allocation_size_for_tracing(); + if (first) { + first = false; + } else { + out << ", "; + } + out << "{" + << "\"name\": \"" << zone->name() << "\", " + << "\"allocated\": " << zone_segment_bytes_allocated << ", " + << "\"used\": " << zone_allocation_size << "}"; + total_segment_bytes_allocated += zone_segment_bytes_allocated; + total_zone_allocation_size += zone_allocation_size; + } + out << "], "; + } else { + // Just calculate total allocated/used memory values. + for (const Zone* zone : active_zones_) { + total_segment_bytes_allocated += zone->segment_bytes_allocated(); + total_zone_allocation_size += zone->allocation_size_for_tracing(); + } + } + out << "\"allocated\": " << total_segment_bytes_allocated << ", " + << "\"used\": " << total_zone_allocation_size << "}"; } - private: - void PrintZoneModificationSample(const Zone* zone, const char* type) { - PrintF( - "{" - "\"type\": \"%s\", " - "\"isolate\": \"%p\", " - "\"time\": %f, " - "\"ptr\": \"%p\", " - "\"name\": \"%s\", " - "\"size\": %zu," - "\"nesting\": %zu}\n", - type, reinterpret_cast<void*>(heap_->isolate()), - heap_->isolate()->time_millis_since_init(), - reinterpret_cast<const void*>(zone), zone->name(), - zone->allocation_size(), nesting_deepth_.load()); - } - - void PrintMemoryJSON(size_t malloced) { - // Note: Neither isolate, nor heap is locked, so be careful with accesses - // as the allocator is potentially used on a concurrent thread. - double time = heap_->isolate()->time_millis_since_init(); - PrintF( - "{" - "\"type\": \"zone\", " - "\"isolate\": \"%p\", " - "\"time\": %f, " - "\"allocated\": %zu}\n", - reinterpret_cast<void*>(heap_->isolate()), time, malloced); - } - - Heap* heap_; - std::atomic<size_t> last_memory_usage_{0}; - std::atomic<size_t> nesting_deepth_{0}; - size_t allocation_sample_bytes_; + Isolate* const isolate_; + std::atomic<size_t> nesting_depth_{0}; + + base::Mutex mutex_; + std::unordered_set<const Zone*> active_zones_; + std::ostringstream buffer_; + // This value is increased on both allocations and deallocations. + size_t memory_traffic_since_last_report_ = 0; }; #ifdef DEBUG @@ -2781,9 +2843,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator) : isolate_data_(this), isolate_allocator_(std::move(isolate_allocator)), id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)), - allocator_(FLAG_trace_zone_stats - ? new VerboseAccountingAllocator(&heap_, 256 * KB) - : new AccountingAllocator()), + allocator_(new TracingAccountingAllocator(this)), builtins_(this), rail_mode_(PERFORMANCE_ANIMATION), code_event_dispatcher_(new CodeEventDispatcher()), @@ -3235,15 +3295,15 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() { AddressToString(isolate_address)); const uintptr_t ro_space_firstpage_address = - reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page()); + heap()->read_only_space()->FirstPageAddress(); add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress, AddressToString(ro_space_firstpage_address)); const uintptr_t map_space_firstpage_address = - reinterpret_cast<uintptr_t>(heap()->map_space()->first_page()); + heap()->map_space()->FirstPageAddress(); add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress, AddressToString(map_space_firstpage_address)); const uintptr_t code_space_firstpage_address = - reinterpret_cast<uintptr_t>(heap()->code_space()->first_page()); + heap()->code_space()->FirstPageAddress(); add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress, AddressToString(code_space_firstpage_address)); } @@ -3616,6 +3676,11 @@ void Isolate::DumpAndResetStats() { counters()->runtime_call_stats()->Print(); counters()->runtime_call_stats()->Reset(); } + if (BasicBlockProfiler::Get()->HasData(this)) { + StdoutStream out; + BasicBlockProfiler::Get()->Print(out, this); + BasicBlockProfiler::Get()->ResetCounts(this); + } } void Isolate::AbortConcurrentOptimization(BlockingBehavior behavior) { @@ -4081,54 +4146,57 @@ void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise, void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type, Handle<JSPromise> promise) { if (!async_event_delegate_) return; - if (type == PromiseHookType::kResolve) return; - - if (type == PromiseHookType::kBefore) { - if (!promise->async_task_id()) return; - async_event_delegate_->AsyncEventOccurred(debug::kDebugWillHandle, - promise->async_task_id(), false); - } else if (type == PromiseHookType::kAfter) { - if (!promise->async_task_id()) return; - async_event_delegate_->AsyncEventOccurred(debug::kDebugDidHandle, - promise->async_task_id(), false); - } else { - DCHECK(type == PromiseHookType::kInit); - debug::DebugAsyncActionType type = debug::kDebugPromiseThen; - bool last_frame_was_promise_builtin = false; - JavaScriptFrameIterator it(this); - while (!it.done()) { - std::vector<Handle<SharedFunctionInfo>> infos; - it.frame()->GetFunctions(&infos); - for (size_t i = 1; i <= infos.size(); ++i) { - Handle<SharedFunctionInfo> info = infos[infos.size() - i]; - if (info->IsUserJavaScript()) { - // We should not report PromiseThen and PromiseCatch which is called - // indirectly, e.g. Promise.all calls Promise.then internally. - if (last_frame_was_promise_builtin) { - if (!promise->async_task_id()) { - promise->set_async_task_id(++async_task_count_); + switch (type) { + case PromiseHookType::kResolve: + return; + case PromiseHookType::kBefore: + if (!promise->async_task_id()) return; + async_event_delegate_->AsyncEventOccurred( + debug::kDebugWillHandle, promise->async_task_id(), false); + break; + case PromiseHookType::kAfter: + if (!promise->async_task_id()) return; + async_event_delegate_->AsyncEventOccurred( + debug::kDebugDidHandle, promise->async_task_id(), false); + break; + case PromiseHookType::kInit: + debug::DebugAsyncActionType type = debug::kDebugPromiseThen; + bool last_frame_was_promise_builtin = false; + JavaScriptFrameIterator it(this); + while (!it.done()) { + std::vector<Handle<SharedFunctionInfo>> infos; + it.frame()->GetFunctions(&infos); + for (size_t i = 1; i <= infos.size(); ++i) { + Handle<SharedFunctionInfo> info = infos[infos.size() - i]; + if (info->IsUserJavaScript()) { + // We should not report PromiseThen and PromiseCatch which is called + // indirectly, e.g. Promise.all calls Promise.then internally. + if (last_frame_was_promise_builtin) { + if (!promise->async_task_id()) { + promise->set_async_task_id(++async_task_count_); + } + async_event_delegate_->AsyncEventOccurred( + type, promise->async_task_id(), debug()->IsBlackboxed(info)); } - async_event_delegate_->AsyncEventOccurred( - type, promise->async_task_id(), debug()->IsBlackboxed(info)); + return; } - return; - } - last_frame_was_promise_builtin = false; - if (info->HasBuiltinId()) { - if (info->builtin_id() == Builtins::kPromisePrototypeThen) { - type = debug::kDebugPromiseThen; - last_frame_was_promise_builtin = true; - } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) { - type = debug::kDebugPromiseCatch; - last_frame_was_promise_builtin = true; - } else if (info->builtin_id() == Builtins::kPromisePrototypeFinally) { - type = debug::kDebugPromiseFinally; - last_frame_was_promise_builtin = true; + last_frame_was_promise_builtin = false; + if (info->HasBuiltinId()) { + if (info->builtin_id() == Builtins::kPromisePrototypeThen) { + type = debug::kDebugPromiseThen; + last_frame_was_promise_builtin = true; + } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) { + type = debug::kDebugPromiseCatch; + last_frame_was_promise_builtin = true; + } else if (info->builtin_id() == + Builtins::kPromisePrototypeFinally) { + type = debug::kDebugPromiseFinally; + last_frame_was_promise_builtin = true; + } } } + it.Advance(); } - it.Advance(); - } } } @@ -4180,6 +4248,13 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) { int Isolate::GetNextScriptId() { return heap()->NextScriptId(); } +int Isolate::GetNextStackFrameInfoId() { + int id = last_stack_frame_info_id(); + int next_id = id == Smi::kMaxValue ? 0 : (id + 1); + set_last_stack_frame_info_id(next_id); + return next_id; +} + // static std::string Isolate::GetTurboCfgFileName(Isolate* isolate) { if (FLAG_trace_turbo_cfg_file == nullptr) { diff --git a/chromium/v8/src/execution/isolate.h b/chromium/v8/src/execution/isolate.h index de00d862a3b..bc13f53edaf 100644 --- a/chromium/v8/src/execution/isolate.h +++ b/chromium/v8/src/execution/isolate.h @@ -464,6 +464,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { public: using HandleScopeType = HandleScope; + void* operator new(size_t) = delete; + void operator delete(void*) = delete; // A thread has a PerIsolateThreadData instance for each isolate that it has // entered. That instance is allocated when the isolate is initially entered @@ -731,6 +733,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { void SetCaptureStackTraceForUncaughtExceptions( bool capture, int frame_limit, StackTrace::StackTraceOptions options); + bool get_capture_stack_trace_for_uncaught_exceptions() const; void SetAbortOnUncaughtExceptionCallback( v8::Isolate::AbortOnUncaughtExceptionCallback callback); @@ -1291,6 +1294,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { int GetNextScriptId(); + int GetNextStackFrameInfoId(); + #if V8_SFI_HAS_UNIQUE_ID int GetNextUniqueSharedFunctionInfoId() { int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed); @@ -1860,8 +1865,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { // Delete new/delete operators to ensure that Isolate::New() and // Isolate::Delete() are used for Isolate creation and deletion. void* operator new(size_t, void* ptr) { return ptr; } - void* operator new(size_t) = delete; - void operator delete(void*) = delete; friend class heap::HeapTester; friend class TestSerializer; diff --git a/chromium/v8/src/execution/local-isolate-wrapper-inl.h b/chromium/v8/src/execution/local-isolate-wrapper-inl.h new file mode 100644 index 00000000000..2f573130491 --- /dev/null +++ b/chromium/v8/src/execution/local-isolate-wrapper-inl.h @@ -0,0 +1,148 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_ +#define V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_ + +#include "src/execution/isolate.h" +#include "src/execution/local-isolate-wrapper.h" +#include "src/execution/off-thread-isolate.h" +#include "src/heap/heap-inl.h" +#include "src/heap/off-thread-heap.h" +#include "src/logging/log.h" +#include "src/logging/off-thread-logger.h" + +namespace v8 { +namespace internal { + +class HeapMethodCaller { + public: + explicit HeapMethodCaller(LocalHeapWrapper* heap) : heap_(heap) {} + + ReadOnlySpace* read_only_space() { + return heap_->is_off_thread() ? heap_->off_thread()->read_only_space() + : heap_->main_thread()->read_only_space(); + } + + void OnAllocationEvent(HeapObject obj, int size) { + return heap_->is_off_thread() + ? heap_->off_thread()->OnAllocationEvent(obj, size) + : heap_->main_thread()->OnAllocationEvent(obj, size); + } + + bool Contains(HeapObject obj) { + return heap_->is_off_thread() ? heap_->off_thread()->Contains(obj) + : heap_->main_thread()->Contains(obj); + } + + private: + LocalHeapWrapper* heap_; +}; + +class LoggerMethodCaller { + public: + explicit LoggerMethodCaller(LocalLoggerWrapper* logger) : logger_(logger) {} + + bool is_logging() const { + return logger_->is_off_thread() ? logger_->off_thread()->is_logging() + : logger_->main_thread()->is_logging(); + } + + void ScriptEvent(Logger::ScriptEventType type, int script_id) { + return logger_->is_off_thread() + ? logger_->off_thread()->ScriptEvent(type, script_id) + : logger_->main_thread()->ScriptEvent(type, script_id); + } + void ScriptDetails(Script script) { + return logger_->is_off_thread() + ? logger_->off_thread()->ScriptDetails(script) + : logger_->main_thread()->ScriptDetails(script); + } + + private: + LocalLoggerWrapper* logger_; +}; + +class IsolateMethodCaller { + public: + explicit IsolateMethodCaller(LocalIsolateWrapper* isolate) + : isolate_(isolate) {} + + LocalLoggerWrapper logger() { + return isolate_->is_off_thread() + ? LocalLoggerWrapper(isolate_->off_thread()->logger()) + : LocalLoggerWrapper(isolate_->main_thread()->logger()); + } + + LocalHeapWrapper heap() { + return isolate_->is_off_thread() + ? LocalHeapWrapper(isolate_->off_thread()->heap()) + : LocalHeapWrapper(isolate_->main_thread()->heap()); + } + + ReadOnlyHeap* read_only_heap() { + return isolate_->is_off_thread() + ? isolate_->off_thread()->read_only_heap() + : isolate_->main_thread()->read_only_heap(); + } + + Object root(RootIndex index) { + return isolate_->is_off_thread() ? isolate_->off_thread()->root(index) + : isolate_->main_thread()->root(index); + } + + int GetNextScriptId() { + return isolate_->is_off_thread() + ? isolate_->off_thread()->GetNextScriptId() + : isolate_->main_thread()->GetNextScriptId(); + } + + private: + LocalIsolateWrapper* isolate_; +}; + +// Helper wrapper for HandleScope behaviour with a LocalIsolateWrapper. +class LocalHandleScopeWrapper { + public: + explicit LocalHandleScopeWrapper(LocalIsolateWrapper local_isolate) + : is_off_thread_(local_isolate.is_off_thread()) { + if (is_off_thread_) { + new (off_thread()) OffThreadHandleScope(local_isolate.off_thread()); + } else { + new (main_thread()) HandleScope(local_isolate.main_thread()); + } + } + ~LocalHandleScopeWrapper() { + if (is_off_thread_) { + off_thread()->~OffThreadHandleScope(); + } else { + main_thread()->~HandleScope(); + } + } + + template <typename T> + Handle<T> CloseAndEscape(Handle<T> handle) { + if (is_off_thread_) { + return off_thread()->CloseAndEscape(handle); + } else { + return main_thread()->CloseAndEscape(handle); + } + } + + private: + HandleScope* main_thread() { + return reinterpret_cast<HandleScope*>(&scope_storage_); + } + OffThreadHandleScope* off_thread() { + return reinterpret_cast<OffThreadHandleScope*>(&scope_storage_); + } + + std::aligned_union_t<0, HandleScope, OffThreadHandleScope> scope_storage_; + bool is_off_thread_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_ diff --git a/chromium/v8/src/execution/local-isolate-wrapper.h b/chromium/v8/src/execution/local-isolate-wrapper.h new file mode 100644 index 00000000000..8dbf0c23919 --- /dev/null +++ b/chromium/v8/src/execution/local-isolate-wrapper.h @@ -0,0 +1,85 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_ +#define V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_ + +#include "src/utils/pointer-with-payload.h" + +namespace v8 { +namespace internal { + +// LocalWrapperBase is the base-class for wrapper classes around a main-thread +// and off-thread type, e.g. Isolate and OffThreadIsolate, and a bit stating +// which of the two the wrapper wraps. +// +// The shared methods are defined on MethodCaller, which will dispatch to the +// right type depending on the state of the wrapper. The reason for a separate +// MethodCaller is to +// +// a) Move the method definitions into an -inl.h so that this header can have +// minimal dependencies, and +// b) To allow the type methods to be called with operator-> (e.g. +// isolate_wrapper->heap()), while forcing the wrapper methods to be called +// with a dot (e.g. isolate_wrapper.is_main_thread()). +template <typename MainThreadType, typename OffThreadType, + typename MethodCaller> +class LocalWrapperBase { + public: + // Helper for returning a MethodCaller* by value from operator->. + class MethodCallerRef { + public: + MethodCaller* operator->() { return &caller_; } + + private: + friend class LocalWrapperBase; + explicit MethodCallerRef(LocalWrapperBase* wrapper) : caller_(wrapper) {} + + MethodCaller caller_; + }; + + explicit LocalWrapperBase(std::nullptr_t) : pointer_and_tag_(nullptr) {} + explicit LocalWrapperBase(MainThreadType* pointer) + : pointer_and_tag_(pointer, false) {} + explicit LocalWrapperBase(OffThreadType* pointer) + : pointer_and_tag_(pointer, true) {} + + MainThreadType* main_thread() { + DCHECK(is_main_thread()); + return static_cast<MainThreadType*>( + pointer_and_tag_.GetPointerWithKnownPayload(false)); + } + OffThreadType* off_thread() { + DCHECK(is_off_thread()); + return static_cast<OffThreadType*>( + pointer_and_tag_.GetPointerWithKnownPayload(true)); + } + + bool is_main_thread() const { + return !is_null() && !pointer_and_tag_.GetPayload(); + } + bool is_off_thread() const { + return !is_null() && pointer_and_tag_.GetPayload(); + } + bool is_null() const { return pointer_and_tag_.GetPointer() == nullptr; } + + // Access the methods via wrapper->Method. + MethodCallerRef operator->() { return MethodCallerRef(this); } + + private: + PointerWithPayload<void, bool, 1> pointer_and_tag_; +}; + +using LocalHeapWrapper = + LocalWrapperBase<class Heap, class OffThreadHeap, class HeapMethodCaller>; +using LocalLoggerWrapper = LocalWrapperBase<class Logger, class OffThreadLogger, + class LoggerMethodCaller>; +using LocalIsolateWrapper = + LocalWrapperBase<class Isolate, class OffThreadIsolate, + class IsolateMethodCaller>; + +} // namespace internal +} // namespace v8 + +#endif // V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_ diff --git a/chromium/v8/src/execution/messages.cc b/chromium/v8/src/execution/messages.cc index 33a2fa99ba6..86e3d48882d 100644 --- a/chromium/v8/src/execution/messages.cc +++ b/chromium/v8/src/execution/messages.cc @@ -311,18 +311,18 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) { } // namespace -Handle<Object> StackFrameBase::GetEvalOrigin() { +Handle<PrimitiveHeapObject> StackFrameBase::GetEvalOrigin() { if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value(); return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked(); } -Handle<Object> StackFrameBase::GetWasmModuleName() { +Handle<PrimitiveHeapObject> StackFrameBase::GetWasmModuleName() { return isolate_->factory()->undefined_value(); } int StackFrameBase::GetWasmFunctionIndex() { return StackFrameBase::kNone; } -Handle<Object> StackFrameBase::GetWasmInstance() { +Handle<HeapObject> StackFrameBase::GetWasmInstance() { return isolate_->factory()->undefined_value(); } @@ -351,6 +351,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array, is_strict_ = (flags & FrameArray::kIsStrict) != 0; is_async_ = (flags & FrameArray::kIsAsync) != 0; is_promise_all_ = (flags & FrameArray::kIsPromiseAll) != 0; + is_promise_any_ = (flags & FrameArray::kIsPromiseAny) != 0; } JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver, @@ -375,7 +376,7 @@ Handle<Object> JSStackFrame::GetFileName() { return handle(GetScript()->name(), isolate_); } -Handle<Object> JSStackFrame::GetFunctionName() { +Handle<PrimitiveHeapObject> JSStackFrame::GetFunctionName() { Handle<String> result = JSFunction::GetDebugName(function_); if (result->length() != 0) return result; @@ -418,7 +419,7 @@ Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() { return ScriptNameOrSourceUrl(GetScript(), isolate_); } -Handle<Object> JSStackFrame::GetMethodName() { +Handle<PrimitiveHeapObject> JSStackFrame::GetMethodName() { if (receiver_->IsNullOrUndefined(isolate_)) { return isolate_->factory()->null_value(); } @@ -452,7 +453,7 @@ Handle<Object> JSStackFrame::GetMethodName() { } HandleScope outer_scope(isolate_); - Handle<Object> result; + Handle<PrimitiveHeapObject> result; for (PrototypeIterator iter(isolate_, receiver, kStartAtReceiver); !iter.IsAtEnd(); iter.Advance()) { Handle<Object> current = PrototypeIterator::GetCurrent(iter); @@ -478,7 +479,7 @@ Handle<Object> JSStackFrame::GetMethodName() { return isolate_->factory()->null_value(); } -Handle<Object> JSStackFrame::GetTypeName() { +Handle<PrimitiveHeapObject> JSStackFrame::GetTypeName() { // TODO(jgruber): Check for strict/constructor here as in // CallSitePrototypeGetThis. @@ -514,7 +515,7 @@ int JSStackFrame::GetColumnNumber() { } int JSStackFrame::GetPromiseIndex() const { - return is_promise_all_ ? offset_ : kNone; + return (is_promise_all_ || is_promise_any_) ? offset_ : kNone; } bool JSStackFrame::IsNative() { @@ -564,8 +565,8 @@ Handle<Object> WasmStackFrame::GetFunction() const { return handle(Smi::FromInt(wasm_func_index_), isolate_); } -Handle<Object> WasmStackFrame::GetFunctionName() { - Handle<Object> name; +Handle<PrimitiveHeapObject> WasmStackFrame::GetFunctionName() { + Handle<PrimitiveHeapObject> name; Handle<WasmModuleObject> module_object(wasm_instance_->module_object(), isolate_); if (!WasmModuleObject::GetFunctionNameOrNull(isolate_, module_object, @@ -582,8 +583,8 @@ Handle<Object> WasmStackFrame::GetScriptNameOrSourceUrl() { return ScriptNameOrSourceUrl(script, isolate_); } -Handle<Object> WasmStackFrame::GetWasmModuleName() { - Handle<Object> module_name; +Handle<PrimitiveHeapObject> WasmStackFrame::GetWasmModuleName() { + Handle<PrimitiveHeapObject> module_name; Handle<WasmModuleObject> module_object(wasm_instance_->module_object(), isolate_); if (!WasmModuleObject::GetModuleNameOrNull(isolate_, module_object) @@ -593,7 +594,7 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() { return module_name; } -Handle<Object> WasmStackFrame::GetWasmInstance() { return wasm_instance_; } +Handle<HeapObject> WasmStackFrame::GetWasmInstance() { return wasm_instance_; } int WasmStackFrame::GetPosition() const { return IsInterpreted() ? offset_ : code_->GetSourcePositionBefore(offset_); @@ -607,7 +608,9 @@ int WasmStackFrame::GetModuleOffset() const { return function_offset + GetPosition(); } -Handle<Object> WasmStackFrame::Null() const { +Handle<Object> WasmStackFrame::GetFileName() { return Null(); } + +Handle<PrimitiveHeapObject> WasmStackFrame::Null() const { return isolate_->factory()->null_value(); } @@ -1258,14 +1261,13 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object, isolate, *location->shared()); UnoptimizedCompileState compile_state(isolate); ParseInfo info(isolate, flags, &compile_state); - if (parsing::ParseAny(&info, location->shared(), isolate)) { + if (parsing::ParseAny(&info, location->shared(), isolate, + parsing::ReportStatisticsMode::kNo)) { info.ast_value_factory()->Internalize(isolate); CallPrinter printer(isolate, location->shared()->IsUserJavaScript()); Handle<String> str = printer.Print(info.literal(), location->start_pos()); *hint = printer.GetErrorHint(); if (str->length() > 0) return str; - } else { - isolate->clear_pending_exception(); } } return BuildDefaultCallSite(isolate, object); @@ -1319,7 +1321,8 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id, isolate, *location.shared()); UnoptimizedCompileState compile_state(isolate); ParseInfo info(isolate, flags, &compile_state); - if (parsing::ParseAny(&info, location.shared(), isolate)) { + if (parsing::ParseAny(&info, location.shared(), isolate, + parsing::ReportStatisticsMode::kNo)) { info.ast_value_factory()->Internalize(isolate); CallPrinter printer(isolate, location.shared()->IsUserJavaScript(), CallPrinter::SpreadErrorInArgsHint::kErrorInArgs); @@ -1334,7 +1337,6 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id, MessageLocation(location.script(), pos, pos + 1, location.shared()); } } else { - isolate->clear_pending_exception(); callsite = BuildDefaultCallSite(isolate, object); } } @@ -1396,7 +1398,8 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate, isolate, *location.shared()); UnoptimizedCompileState compile_state(isolate); ParseInfo info(isolate, flags, &compile_state); - if (parsing::ParseAny(&info, location.shared(), isolate)) { + if (parsing::ParseAny(&info, location.shared(), isolate, + parsing::ReportStatisticsMode::kNo)) { info.ast_value_factory()->Internalize(isolate); CallPrinter printer(isolate, location.shared()->IsUserJavaScript()); Handle<String> str = printer.Print(info.literal(), location.start_pos()); @@ -1431,8 +1434,6 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate, } if (str->length() > 0) callsite = str; - } else { - isolate->clear_pending_exception(); } } diff --git a/chromium/v8/src/execution/messages.h b/chromium/v8/src/execution/messages.h index 963796c7fe6..1fe79031766 100644 --- a/chromium/v8/src/execution/messages.h +++ b/chromium/v8/src/execution/messages.h @@ -27,6 +27,7 @@ class AbstractCode; class FrameArray; class JSMessageObject; class LookupIterator; +class PrimitiveHeapObject; class SharedFunctionInfo; class SourceInfo; class WasmInstanceObject; @@ -67,13 +68,13 @@ class StackFrameBase { virtual Handle<Object> GetFunction() const = 0; virtual Handle<Object> GetFileName() = 0; - virtual Handle<Object> GetFunctionName() = 0; + virtual Handle<PrimitiveHeapObject> GetFunctionName() = 0; virtual Handle<Object> GetScriptNameOrSourceUrl() = 0; - virtual Handle<Object> GetMethodName() = 0; - virtual Handle<Object> GetTypeName() = 0; - virtual Handle<Object> GetEvalOrigin(); - virtual Handle<Object> GetWasmModuleName(); - virtual Handle<Object> GetWasmInstance(); + virtual Handle<PrimitiveHeapObject> GetMethodName() = 0; + virtual Handle<PrimitiveHeapObject> GetTypeName() = 0; + virtual Handle<PrimitiveHeapObject> GetEvalOrigin(); + virtual Handle<PrimitiveHeapObject> GetWasmModuleName(); + virtual Handle<HeapObject> GetWasmInstance(); // Returns the script ID if one is attached, -1 otherwise. int GetScriptId() const; @@ -86,7 +87,8 @@ class StackFrameBase { // Return 0-based Wasm function index. Returns -1 for non-Wasm frames. virtual int GetWasmFunctionIndex(); - // Returns index for Promise.all() async frames, or -1 for other frames. + // Returns the index of the rejected promise in the Promise combinator input, + // or -1 if this frame is not a Promise combinator frame. virtual int GetPromiseIndex() const = 0; virtual bool IsNative() = 0; @@ -94,6 +96,7 @@ class StackFrameBase { virtual bool IsEval(); virtual bool IsAsync() const = 0; virtual bool IsPromiseAll() const = 0; + virtual bool IsPromiseAny() const = 0; virtual bool IsConstructor() = 0; virtual bool IsStrict() const = 0; @@ -121,10 +124,10 @@ class JSStackFrame : public StackFrameBase { Handle<Object> GetFunction() const override; Handle<Object> GetFileName() override; - Handle<Object> GetFunctionName() override; + Handle<PrimitiveHeapObject> GetFunctionName() override; Handle<Object> GetScriptNameOrSourceUrl() override; - Handle<Object> GetMethodName() override; - Handle<Object> GetTypeName() override; + Handle<PrimitiveHeapObject> GetMethodName() override; + Handle<PrimitiveHeapObject> GetTypeName() override; int GetPosition() const override; int GetLineNumber() override; @@ -136,6 +139,7 @@ class JSStackFrame : public StackFrameBase { bool IsToplevel() override; bool IsAsync() const override { return is_async_; } bool IsPromiseAll() const override { return is_promise_all_; } + bool IsPromiseAny() const override { return is_promise_any_; } bool IsConstructor() override { return is_constructor_; } bool IsStrict() const override { return is_strict_; } @@ -155,6 +159,7 @@ class JSStackFrame : public StackFrameBase { bool is_async_ : 1; bool is_constructor_ : 1; bool is_promise_all_ : 1; + bool is_promise_any_ : 1; bool is_strict_ : 1; friend class FrameArrayIterator; @@ -167,13 +172,13 @@ class WasmStackFrame : public StackFrameBase { Handle<Object> GetReceiver() const override; Handle<Object> GetFunction() const override; - Handle<Object> GetFileName() override { return Null(); } - Handle<Object> GetFunctionName() override; + Handle<Object> GetFileName() override; + Handle<PrimitiveHeapObject> GetFunctionName() override; Handle<Object> GetScriptNameOrSourceUrl() override; - Handle<Object> GetMethodName() override { return Null(); } - Handle<Object> GetTypeName() override { return Null(); } - Handle<Object> GetWasmModuleName() override; - Handle<Object> GetWasmInstance() override; + Handle<PrimitiveHeapObject> GetMethodName() override { return Null(); } + Handle<PrimitiveHeapObject> GetTypeName() override { return Null(); } + Handle<PrimitiveHeapObject> GetWasmModuleName() override; + Handle<HeapObject> GetWasmInstance() override; int GetPosition() const override; int GetLineNumber() override { return 0; } @@ -186,12 +191,13 @@ class WasmStackFrame : public StackFrameBase { bool IsToplevel() override { return false; } bool IsAsync() const override { return false; } bool IsPromiseAll() const override { return false; } + bool IsPromiseAny() const override { return false; } bool IsConstructor() override { return false; } bool IsStrict() const override { return false; } bool IsInterpreted() const { return code_ == nullptr; } protected: - Handle<Object> Null() const; + Handle<PrimitiveHeapObject> Null() const; bool HasScript() const override; Handle<Script> GetScript() const override; @@ -308,7 +314,7 @@ class ErrorUtils : public AllStatic { class MessageFormatter { public: - static const char* TemplateString(MessageTemplate index); + V8_EXPORT_PRIVATE static const char* TemplateString(MessageTemplate index); V8_EXPORT_PRIVATE static MaybeHandle<String> Format(Isolate* isolate, MessageTemplate index, diff --git a/chromium/v8/src/execution/microtask-queue.h b/chromium/v8/src/execution/microtask-queue.h index 4ce1498279c..82840c2bed5 100644 --- a/chromium/v8/src/execution/microtask-queue.h +++ b/chromium/v8/src/execution/microtask-queue.h @@ -26,7 +26,7 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue { static void SetUpDefaultMicrotaskQueue(Isolate* isolate); static std::unique_ptr<MicrotaskQueue> New(Isolate* isolate); - ~MicrotaskQueue(); + ~MicrotaskQueue() override; // Uses raw Address values because it's called via ExternalReference. // {raw_microtask} is a tagged Microtask pointer. diff --git a/chromium/v8/src/execution/off-thread-isolate-inl.h b/chromium/v8/src/execution/off-thread-isolate-inl.h index 13dfebd47f5..9e82ad9b73b 100644 --- a/chromium/v8/src/execution/off-thread-isolate-inl.h +++ b/chromium/v8/src/execution/off-thread-isolate-inl.h @@ -15,6 +15,14 @@ namespace internal { Address OffThreadIsolate::isolate_root() const { return isolate_->isolate_root(); } +ReadOnlyHeap* OffThreadIsolate::read_only_heap() { + return isolate_->read_only_heap(); +} + +Object OffThreadIsolate::root(RootIndex index) { + DCHECK(RootsTable::IsImmortalImmovable(index)); + return isolate_->root(index); +} } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/execution/off-thread-isolate.cc b/chromium/v8/src/execution/off-thread-isolate.cc index 3a4c39052f2..08675493723 100644 --- a/chromium/v8/src/execution/off-thread-isolate.cc +++ b/chromium/v8/src/execution/off-thread-isolate.cc @@ -7,88 +7,12 @@ #include "src/execution/isolate.h" #include "src/execution/thread-id.h" #include "src/handles/handles-inl.h" +#include "src/handles/off-thread-transfer-handle-storage-inl.h" #include "src/logging/off-thread-logger.h" namespace v8 { namespace internal { -class OffThreadTransferHandleStorage { - public: - enum State { kOffThreadHandle, kRawObject, kHandle }; - - explicit OffThreadTransferHandleStorage( - Address* off_thread_handle_location, - std::unique_ptr<OffThreadTransferHandleStorage> next) - : handle_location_(off_thread_handle_location), - next_(std::move(next)), - state_(kOffThreadHandle) { - CheckValid(); - } - - void ConvertFromOffThreadHandleOnFinish() { - CheckValid(); - DCHECK_EQ(state_, kOffThreadHandle); - raw_obj_ptr_ = *handle_location_; - state_ = kRawObject; - CheckValid(); - } - - void ConvertToHandleOnPublish(Isolate* isolate) { - CheckValid(); - DCHECK_EQ(state_, kRawObject); - handle_location_ = handle(Object(raw_obj_ptr_), isolate).location(); - state_ = kHandle; - CheckValid(); - } - - Address* handle_location() const { - DCHECK_EQ(state_, kHandle); - DCHECK( - Object(*handle_location_).IsSmi() || - !Heap::InOffThreadSpace(HeapObject::cast(Object(*handle_location_)))); - return handle_location_; - } - - OffThreadTransferHandleStorage* next() { return next_.get(); } - - State state() const { return state_; } - - private: - void CheckValid() { -#ifdef DEBUG - Object obj; - - switch (state_) { - case kHandle: - case kOffThreadHandle: - DCHECK_NOT_NULL(handle_location_); - obj = Object(*handle_location_); - break; - case kRawObject: - obj = Object(raw_obj_ptr_); - break; - } - - // Smis are always fine. - if (obj.IsSmi()) return; - - // The object that is not yet in a main-thread handle should be in - // off-thread space. Main-thread handles can still point to off-thread space - // during Publish, so that invariant is taken care of on main-thread handle - // access. - DCHECK_IMPLIES(state_ != kHandle, - Heap::InOffThreadSpace(HeapObject::cast(obj))); -#endif - } - - union { - Address* handle_location_; - Address raw_obj_ptr_; - }; - std::unique_ptr<OffThreadTransferHandleStorage> next_; - State state_; -}; - Address* OffThreadTransferHandleBase::ToHandleLocation() const { return storage_ == nullptr ? nullptr : storage_->handle_location(); } @@ -98,32 +22,16 @@ OffThreadIsolate::OffThreadIsolate(Isolate* isolate, Zone* zone) heap_(isolate->heap()), isolate_(isolate), logger_(new OffThreadLogger()), - handle_zone_(zone), - off_thread_transfer_handles_head_(nullptr) {} + handle_zone_(zone) {} OffThreadIsolate::~OffThreadIsolate() = default; void OffThreadIsolate::FinishOffThread() { heap()->FinishOffThread(); - - OffThreadTransferHandleStorage* storage = - off_thread_transfer_handles_head_.get(); - while (storage != nullptr) { - storage->ConvertFromOffThreadHandleOnFinish(); - storage = storage->next(); - } - handle_zone_ = nullptr; } void OffThreadIsolate::Publish(Isolate* isolate) { - OffThreadTransferHandleStorage* storage = - off_thread_transfer_handles_head_.get(); - while (storage != nullptr) { - storage->ConvertToHandleOnPublish(isolate); - storage = storage->next(); - } - heap()->Publish(isolate->heap()); } @@ -145,16 +53,5 @@ void OffThreadIsolate::PinToCurrentThread() { thread_id_ = ThreadId::Current(); } -OffThreadTransferHandleStorage* OffThreadIsolate::AddTransferHandleStorage( - HandleBase handle) { - DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr, - off_thread_transfer_handles_head_->state() == - OffThreadTransferHandleStorage::kOffThreadHandle); - off_thread_transfer_handles_head_ = - std::make_unique<OffThreadTransferHandleStorage>( - handle.location(), std::move(off_thread_transfer_handles_head_)); - return off_thread_transfer_handles_head_.get(); -} - } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/execution/off-thread-isolate.h b/chromium/v8/src/execution/off-thread-isolate.h index 80fea9bc4c9..e5217ef3bf7 100644 --- a/chromium/v8/src/execution/off-thread-isolate.h +++ b/chromium/v8/src/execution/off-thread-isolate.h @@ -86,6 +86,8 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final OffThreadHeap* heap() { return &heap_; } inline Address isolate_root() const; + inline ReadOnlyHeap* read_only_heap(); + inline Object root(RootIndex index); v8::internal::OffThreadFactory* factory() { // Upcast to the privately inherited base-class using c-style casts to avoid @@ -129,7 +131,7 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final if (handle.is_null()) { return OffThreadTransferHandle<T>(); } - return OffThreadTransferHandle<T>(AddTransferHandleStorage(handle)); + return OffThreadTransferHandle<T>(heap()->AddTransferHandleStorage(handle)); } template <typename T> @@ -139,7 +141,8 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final if (!maybe_handle.ToHandle(&handle)) { return OffThreadTransferMaybeHandle<T>(); } - return OffThreadTransferMaybeHandle<T>(AddTransferHandleStorage(handle)); + return OffThreadTransferMaybeHandle<T>( + heap()->AddTransferHandleStorage(handle)); } int GetNextScriptId(); @@ -157,8 +160,6 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final private: friend class v8::internal::OffThreadFactory; - OffThreadTransferHandleStorage* AddTransferHandleStorage(HandleBase handle); - OffThreadHeap heap_; // TODO(leszeks): Extract out the fields of the Isolate we want and store @@ -168,8 +169,6 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final std::unique_ptr<OffThreadLogger> logger_; ThreadId thread_id_; Zone* handle_zone_; - std::unique_ptr<OffThreadTransferHandleStorage> - off_thread_transfer_handles_head_; }; } // namespace internal diff --git a/chromium/v8/src/execution/s390/simulator-s390.cc b/chromium/v8/src/execution/s390/simulator-s390.cc index f41288f6a96..85688f861ce 100644 --- a/chromium/v8/src/execution/s390/simulator-s390.cc +++ b/chromium/v8/src/execution/s390/simulator-s390.cc @@ -785,9 +785,10 @@ void Simulator::EvalTableInit() { V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \ V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \ V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \ - V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \ - V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \ - V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \ + V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */ \ + V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \ + V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \ + V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \ V(vesrl, VESRL, \ 0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \ V(vesrlv, VESRLV, \ @@ -3702,6 +3703,34 @@ EVALUATE(VPERM) { return length; } +EVALUATE(VBPERM) { + DCHECK_OPCODE(VBPERM); + DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4); + USE(m4); + USE(m5); + USE(m6); + uint16_t result_bits = 0; + for (int i = 0; i < kSimd128Size; i++) { + result_bits <<= 1; + uint8_t selected_bit_index = get_simd_register_by_lane<uint8_t>(r3, i); + unsigned __int128 src_bits = + *(reinterpret_cast<__int128*>(get_simd_register(r2).int8)); + if (selected_bit_index < (kSimd128Size * kBitsPerByte)) { + unsigned __int128 bit_value = + (src_bits << selected_bit_index) >> (kSimd128Size * kBitsPerByte - 1); + result_bits |= bit_value; + } + } + set_simd_register_by_lane<uint64_t>(r1, 0, 0); + set_simd_register_by_lane<uint64_t>(r1, 1, 0); + // Write back in bytes to avoid endianness problems. + set_simd_register_by_lane<uint8_t>(r1, 6, + static_cast<uint8_t>(result_bits >> 8)); + set_simd_register_by_lane<uint8_t>( + r1, 7, static_cast<uint8_t>((result_bits << 8) >> 8)); + return length; +} + EVALUATE(VSEL) { DCHECK_OPCODE(VSEL); DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5); @@ -3977,33 +4006,35 @@ EVALUATE(VFNMS) { #undef VECTOR_FP_MULTIPLY_QFMS_OPERATION template <class T, class Operation> -void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) { +void VectorFPMaxMin(void* dst, void* src1, void* src2, int mode, Operation op) { T* dst_ptr = reinterpret_cast<T*>(dst); T* src1_ptr = reinterpret_cast<T*>(src1); T* src2_ptr = reinterpret_cast<T*>(src2); for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) { T src1_val = *(src1_ptr + i); T src2_val = *(src2_ptr + i); - T value = op(src1_val, src2_val); - // using Java's Max Min functions - if (isnan(src1_val) || isnan(src2_val)) { - value = NAN; - } + T value = op(src1_val, src2_val, mode); memcpy(dst_ptr + i, &value, sizeof(T)); } } -#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op) \ +#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op, std_op) \ VectorFPMaxMin<type>(&get_simd_register(r1), &get_simd_register(r2), \ - &get_simd_register(r3), [](type a, type b) { \ - if (signbit(b) op signbit(a)) \ + &get_simd_register(r3), m6, \ + [](type a, type b, int mode) { \ + if (mode == 3) { \ + return std::std_op(a, b); \ + } \ + if (isnan(a) || isnan(b)) \ + return static_cast<type>(NAN); \ + else if (signbit(b) op signbit(a)) \ return a; \ else if (signbit(b) != signbit(a)) \ return b; \ return (a op b) ? a : b; \ }); -#define VECTOR_FP_MAX_MIN(op) \ +#define VECTOR_FP_MAX_MIN(op, std_op) \ switch (m4) { \ case 2: \ if (m5 == 8) { \ @@ -4012,8 +4043,7 @@ void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) { set_simd_register_by_lane<float>(r1, 0, (src1 op src2) ? src1 : src2); \ } else { \ DCHECK_EQ(m5, 0); \ - DCHECK_EQ(m6, 1); \ - VECTOR_FP_MAX_MIN_FOR_TYPE(float, op) \ + VECTOR_FP_MAX_MIN_FOR_TYPE(float, op, std_op) \ } \ break; \ case 3: \ @@ -4024,8 +4054,7 @@ void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) { (src1 op src2) ? src1 : src2); \ } else { \ DCHECK_EQ(m5, 0); \ - DCHECK_EQ(m6, 1); \ - VECTOR_FP_MAX_MIN_FOR_TYPE(double, op) \ + VECTOR_FP_MAX_MIN_FOR_TYPE(double, op, std_op) \ } \ break; \ default: \ @@ -4037,8 +4066,7 @@ EVALUATE(VFMIN) { DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)); DCHECK_OPCODE(VFMIN); DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4); - USE(m6); - VECTOR_FP_MAX_MIN(<) // NOLINT + VECTOR_FP_MAX_MIN(<, min) // NOLINT return length; } @@ -4047,7 +4075,7 @@ EVALUATE(VFMAX) { DCHECK_OPCODE(VFMAX); DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4); USE(m6); - VECTOR_FP_MAX_MIN(>) // NOLINT + VECTOR_FP_MAX_MIN(>, max) // NOLINT return length; } @@ -4224,24 +4252,39 @@ EVALUATE(VFSQ) { return length; } +#define ROUNDING_SWITCH(type) \ + switch (m5) { \ + case 4: \ + set_simd_register_by_lane<type>(r1, i, nearbyint(value)); \ + break; \ + case 5: \ + set_simd_register_by_lane<type>(r1, i, trunc(value)); \ + break; \ + case 6: \ + set_simd_register_by_lane<type>(r1, i, ceil(value)); \ + break; \ + case 7: \ + set_simd_register_by_lane<type>(r1, i, floor(value)); \ + break; \ + default: \ + UNREACHABLE(); \ + } EVALUATE(VFI) { DCHECK_OPCODE(VFI); DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3); USE(m4); - USE(m5); - DCHECK_EQ(m5, 5); switch (m3) { case 2: DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)); for (int i = 0; i < 4; i++) { float value = get_simd_register_by_lane<float>(r2, i); - set_simd_register_by_lane<float>(r1, i, trunc(value)); + ROUNDING_SWITCH(float) } break; case 3: for (int i = 0; i < 2; i++) { double value = get_simd_register_by_lane<double>(r2, i); - set_simd_register_by_lane<double>(r1, i, trunc(value)); + ROUNDING_SWITCH(double) } break; default: @@ -4249,6 +4292,7 @@ EVALUATE(VFI) { } return length; } +#undef ROUNDING_SWITCH EVALUATE(DUMY) { DCHECK_OPCODE(DUMY); diff --git a/chromium/v8/src/execution/simulator.h b/chromium/v8/src/execution/simulator.h index a4e07b235b4..74763474c61 100644 --- a/chromium/v8/src/execution/simulator.h +++ b/chromium/v8/src/execution/simulator.h @@ -128,7 +128,7 @@ class GeneratedCode { #if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) FATAL("Generated code execution not possible during cross-compilation."); #endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) -#if V8_OS_AIX +#if ABI_USES_FUNCTION_DESCRIPTORS // AIX ABI requires function descriptors (FD). Artificially create a pseudo // FD to ensure correct dispatch to generated code. The 'volatile' // declaration is required to avoid the compiler from not observing the @@ -140,7 +140,7 @@ class GeneratedCode { return fn(args...); #else return fn_ptr_(args...); -#endif // V8_OS_AIX +#endif // ABI_USES_FUNCTION_DESCRIPTORS } #endif // USE_SIMULATOR diff --git a/chromium/v8/src/execution/stack-guard.cc b/chromium/v8/src/execution/stack-guard.cc index d37327f1c3d..90689556673 100644 --- a/chromium/v8/src/execution/stack-guard.cc +++ b/chromium/v8/src/execution/stack-guard.cc @@ -272,8 +272,7 @@ Object StackGuard::HandleInterrupts() { } if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), - "V8.WasmGrowSharedMemory"); + TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory"); BackingStore::UpdateSharedWasmMemoryObjects(isolate_); } @@ -297,12 +296,12 @@ Object StackGuard::HandleInterrupts() { } if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode"); + TRACE_EVENT0("v8.wasm", "V8.LogCode"); isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_); } if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC"); + TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC"); isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_); } |