summaryrefslogtreecommitdiff
path: root/deps/v8/src/base
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-07-14 11:30:07 +0200
committerMichaël Zasso <targos@protonmail.com>2021-07-20 15:24:51 +0200
commit6cdd310275bb0f8056aa0ae6d95614e9ca5b70c7 (patch)
tree9ed37b19cd668894854b7f469010f7621e63ef81 /deps/v8/src/base
parentc0f10006c82d2d9896a552de98ed146f9542720d (diff)
downloadnode-new-6cdd310275bb0f8056aa0ae6d95614e9ca5b70c7.tar.gz
deps: update V8 to 9.2.230.21
PR-URL: https://github.com/nodejs/node/pull/38990 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Robert Nagy <ronagy@icloud.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/base')
-rw-r--r--deps/v8/src/base/atomicops.h298
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h216
-rw-r--r--deps/v8/src/base/atomicops_internals_std.h224
-rw-r--r--deps/v8/src/base/bit-field.h2
-rw-r--r--deps/v8/src/base/bits.h4
-rw-r--r--deps/v8/src/base/bounds.h2
-rw-r--r--deps/v8/src/base/cpu.cc2
-rw-r--r--deps/v8/src/base/enum-set.h2
-rw-r--r--deps/v8/src/base/hashmap.h4
-rw-r--r--deps/v8/src/base/logging.h6
-rw-r--r--deps/v8/src/base/once.h16
-rw-r--r--deps/v8/src/base/optional.h12
-rw-r--r--deps/v8/src/base/platform/mutex.h4
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc11
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc2
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/sanitizer/asan.h37
-rw-r--r--deps/v8/src/base/sanitizer/lsan-page-allocator.cc75
-rw-r--r--deps/v8/src/base/sanitizer/lsan-page-allocator.h60
-rw-r--r--deps/v8/src/base/sanitizer/lsan.h (renamed from deps/v8/src/base/lsan.h)12
-rw-r--r--deps/v8/src/base/sanitizer/msan.h40
-rw-r--r--deps/v8/src/base/small-vector.h7
-rw-r--r--deps/v8/src/base/template-utils.h9
-rw-r--r--deps/v8/src/base/v8-fallthrough.h2
-rw-r--r--deps/v8/src/base/vlq.h83
27 files changed, 565 insertions, 573 deletions
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 5d6422be52..cb6940ea70 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -27,6 +27,8 @@
#include <stdint.h>
+#include <atomic>
+
// Small C++ header which defines implementation specific macros used to
// identify the STL implementation.
// - libc++: captures __config for _LIBCPP_VERSION
@@ -35,6 +37,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
+#include "src/base/macros.h"
#if defined(V8_OS_STARBOARD)
#include "starboard/atomic.h"
@@ -77,6 +80,21 @@ using AtomicWord = SbAtomicPtr;
using AtomicWord = intptr_t;
#endif
+namespace helper {
+template <typename T>
+volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
+ return reinterpret_cast<volatile std::atomic<T>*>(ptr);
+}
+template <typename T>
+volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
+ return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
+}
+} // namespace helper
+
+inline void SeqCst_MemoryFence() {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
// Atomically execute:
// result = *ptr;
// if (result == old_value)
@@ -86,75 +104,225 @@ using AtomicWord = intptr_t;
// I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
// Always return the value of |*ptr| before the operation.
// Acquire, Relaxed, Release correspond to standard C++ memory orders.
-Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value);
-Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value,
- Atomic16 new_value);
-Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value);
-Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value);
-Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-#endif // V8_HOST_ARCH_64_BIT
-
-// Atomically store new_value into |*ptr|, returning the previous value held in
-// |*ptr|.
-Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-#endif // V8_HOST_ARCH_64_BIT
-
-// Atomically increment |*ptr| by |increment|. Returns the new value of
-// |*ptr| with the increment applied.
-Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-#endif // V8_HOST_ARCH_64_BIT
-
-void SeqCst_MemoryFence();
-
-void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
-void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value);
-void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
-void Release_Store(volatile Atomic32* ptr, Atomic32 value);
-#ifdef V8_HOST_ARCH_64_BIT
-void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
-void Release_Store(volatile Atomic64* ptr, Atomic64 value);
-#endif // V8_HOST_ARCH_64_BIT
-
-Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
-Atomic16 Relaxed_Load(volatile const Atomic16* ptr);
-Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
-Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
-Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-#endif // V8_HOST_ARCH_64_BIT
+inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
+ Atomic8 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
+ Atomic16 old_value, Atomic16 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
+ increment,
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acquire, std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
+ Atomic8 new_value) {
+ bool result = atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ USE(result); // Make gcc compiler happy.
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acq_rel, std::memory_order_acquire);
+ return old_value;
+}
+
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+#if defined(V8_HOST_ARCH_64_BIT)
+
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
+ increment,
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acquire, std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acq_rel, std::memory_order_acquire);
+ return old_value;
+}
+
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+#endif // defined(V8_HOST_ARCH_64_BIT)
+
+inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
+ size_t bytes) {
+ constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
+ while (bytes > 0 &&
+ !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ Relaxed_Store(dst++, Relaxed_Load(src++));
+ --bytes;
+ }
+ if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
+ IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ while (bytes >= kAtomicWordSize) {
+ Relaxed_Store(
+ reinterpret_cast<volatile AtomicWord*>(dst),
+ Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
+ dst += kAtomicWordSize;
+ src += kAtomicWordSize;
+ bytes -= kAtomicWordSize;
+ }
+ }
+ while (bytes > 0) {
+ Relaxed_Store(dst++, Relaxed_Load(src++));
+ --bytes;
+ }
+}
} // namespace base
} // namespace v8
-#if defined(V8_OS_WIN) || defined(V8_OS_STARBOARD)
-#include "src/base/atomicops_internals_std.h"
-#else
-// TODO(ulan): Switch to std version after performance regression with Wheezy
-// sysroot is no longer relevant. Debian Wheezy LTS ends on 31st of May 2018.
-#include "src/base/atomicops_internals_portable.h"
-#endif
-
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
deleted file mode 100644
index ac162e2a82..0000000000
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// This implementation uses C++11 atomics' member functions. The code base is
-// currently written assuming atomicity revolves around accesses instead of
-// C++11's memory locations. The burden is on the programmer to ensure that all
-// memory locations accessed atomically are never accessed non-atomically (tsan
-// should help with this).
-//
-// Of note in this implementation:
-// * All NoBarrier variants are implemented as relaxed.
-// * All Barrier variants are implemented as sequentially-consistent.
-// * Compare exchange's failure ordering is always the same as the success one
-// (except for release, which fails as relaxed): using a weaker ordering is
-// only valid under certain uses of compare exchange.
-// * Acquire store doesn't exist in the C11 memory model, it is instead
-// implemented as a relaxed store followed by a sequentially consistent
-// fence.
-// * Release load doesn't exist in the C11 memory model, it is instead
-// implemented as sequentially consistent fence followed by a relaxed load.
-// * Atomic increment is expected to return the post-incremented value, whereas
-// C11 fetch add returns the previous value. The implementation therefore
-// needs to increment twice (which the compiler should be able to detect and
-// optimize).
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-
-#include <atomic>
-
-#include "src/base/build_config.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-// This implementation is transitional and maintains the original API for
-// atomicops.h.
-
-inline void SeqCst_MemoryFence() {
-#if defined(__GLIBCXX__)
- // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
- // not defined, leading to the linker complaining about undefined references.
- __atomic_thread_fence(std::memory_order_seq_cst);
-#else
- std::atomic_thread_fence(std::memory_order_seq_cst);
-#endif
-}
-
-inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
- Atomic16 old_value, Atomic16 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-#if defined(V8_HOST_ARCH_64_BIT)
-
-inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-#endif // defined(V8_HOST_ARCH_64_BIT)
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h
deleted file mode 100644
index 1638b8b52a..0000000000
--- a/deps/v8/src/base/atomicops_internals_std.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_STD_H_
-
-#include <atomic>
-
-#include "src/base/build_config.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-namespace helper {
-template <typename T>
-volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
- return reinterpret_cast<volatile std::atomic<T>*>(ptr);
-}
-template <typename T>
-volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
- return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
-}
-} // namespace helper
-
-inline void SeqCst_MemoryFence() {
- std::atomic_thread_fence(std::memory_order_seq_cst);
-}
-
-inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
- Atomic16 old_value, Atomic16 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acquire, std::memory_order_acquire);
- return old_value;
-}
-
-inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acq_rel, std::memory_order_acquire);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-#if defined(V8_HOST_ARCH_64_BIT)
-
-inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acquire, std::memory_order_acquire);
- return old_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acq_rel, std::memory_order_acquire);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-#endif // defined(V8_HOST_ARCH_64_BIT)
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_STD_H_
diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h
index ca5fb45921..7b2796e3df 100644
--- a/deps/v8/src/base/bit-field.h
+++ b/deps/v8/src/base/bit-field.h
@@ -52,7 +52,7 @@ class BitField final {
// Returns a type U with the bit field value encoded.
static constexpr U encode(T value) {
- CONSTEXPR_DCHECK(is_valid(value));
+ DCHECK(is_valid(value));
return static_cast<U>(value) << kShift;
}
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index b137f73936..f790dfaab4 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -144,7 +144,7 @@ inline constexpr
typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
unsigned>::type
CountTrailingZerosNonZero(T value) {
- CONSTEXPR_DCHECK(value != 0);
+ DCHECK_NE(0, value);
#if V8_HAS_BUILTIN_CTZ
return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value));
@@ -165,7 +165,7 @@ constexpr inline bool IsPowerOfTwo(T value) {
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
inline constexpr int WhichPowerOfTwo(T value) {
- CONSTEXPR_DCHECK(IsPowerOfTwo(value));
+ DCHECK(IsPowerOfTwo(value));
#if V8_HAS_BUILTIN_CTZ
STATIC_ASSERT(sizeof(T) <= 8);
return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
diff --git a/deps/v8/src/base/bounds.h b/deps/v8/src/base/bounds.h
index fb8c968d66..0fe141b309 100644
--- a/deps/v8/src/base/bounds.h
+++ b/deps/v8/src/base/bounds.h
@@ -15,7 +15,7 @@ namespace base {
// branch.
template <typename T, typename U>
inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
- CONSTEXPR_DCHECK(lower_limit <= higher_limit);
+ DCHECK_LE(lower_limit, higher_limit);
STATIC_ASSERT(sizeof(U) <= sizeof(T));
using unsigned_T = typename std::make_unsigned<T>::type;
// Use static_cast to support enum classes.
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 17ef42a299..9ddf8939be 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -50,7 +50,7 @@
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
#if V8_OS_WIN
-#include "src/base/win32-headers.h" // NOLINT
+#include "src/base/win32-headers.h"
#endif
namespace v8 {
diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h
index f623198c2d..ce49b3996e 100644
--- a/deps/v8/src/base/enum-set.h
+++ b/deps/v8/src/base/enum-set.h
@@ -79,7 +79,7 @@ class EnumSet {
explicit constexpr EnumSet(T bits) : bits_(bits) {}
static constexpr T Mask(E element) {
- CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast<size_t>(element));
+ DCHECK_GT(sizeof(T) * 8, static_cast<size_t>(element));
return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
}
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 179da5ecba..819d589a81 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -530,8 +530,8 @@ class TemplateHashMap
AllocationPolicy>;
public:
- STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
- STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
+ STATIC_ASSERT(sizeof(Key*) == sizeof(void*));
+ STATIC_ASSERT(sizeof(Value*) == sizeof(void*));
struct value_type {
Key* first;
Value* second;
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 2c4c536cf3..08db24a947 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -134,12 +134,6 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#endif
-#if V8_HAS_CXX14_CONSTEXPR
-#define CONSTEXPR_DCHECK(cond) DCHECK(cond)
-#else
-#define CONSTEXPR_DCHECK(cond)
-#endif
-
namespace detail {
template <typename... Ts>
std::string PrintToString(Ts&&... ts) {
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index dd8b6be621..c4224e84e3 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -53,10 +53,12 @@
#define V8_BASE_ONCE_H_
#include <stddef.h>
+
#include <atomic>
#include <functional>
#include "src/base/base-export.h"
+#include "src/base/template-utils.h"
namespace v8 {
namespace base {
@@ -76,9 +78,9 @@ enum : uint8_t {
using PointerArgFunction = void (*)(void* arg);
-template <typename T>
-struct OneArgFunction {
- using type = void (*)(T);
+template <typename... Args>
+struct FunctionWithArgs {
+ using type = void (*)(Args...);
};
V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
@@ -90,11 +92,13 @@ inline void CallOnce(OnceType* once, std::function<void()> init_func) {
}
}
-template <typename Arg>
+template <typename... Args, typename = std::enable_if_t<
+ conjunction<std::is_scalar<Args>...>::value>>
inline void CallOnce(OnceType* once,
- typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+ typename FunctionWithArgs<Args...>::type init_func,
+ Args... args) {
if (once->load(std::memory_order_acquire) != ONCE_STATE_DONE) {
- CallOnceImpl(once, [=]() { init_func(arg); });
+ CallOnceImpl(once, [=]() { init_func(args...); });
}
}
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 3c13e654c8..77e9bb896e 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -558,32 +558,32 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
}
constexpr const T* operator->() const {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr T* operator->() {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr const T& operator*() const& {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr T& operator*() & {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr const T&& operator*() const&& {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
constexpr T&& operator*() && {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 5685797f4e..328c593a30 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -14,7 +14,7 @@
#include "src/base/logging.h"
#if V8_OS_POSIX
-#include <pthread.h> // NOLINT
+#include <pthread.h>
#endif
#if V8_OS_STARBOARD
@@ -164,6 +164,8 @@ class V8_BASE_EXPORT RecursiveMutex final {
// successfully locked.
bool TryLock() V8_WARN_UNUSED_RESULT;
+ V8_INLINE void AssertHeld() const { DCHECK_LT(0, level_); }
+
private:
// The implementation-defined native handle type.
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 6b6a870370..e5a5305d48 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -82,7 +82,7 @@ double AIXTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16));
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index edc793c662..ac36b0527e 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -44,7 +44,7 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16));
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index ee787f7d9a..9f61a0aeb5 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -23,12 +23,12 @@
#include <sys/types.h>
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
-#include <sys/sysctl.h> // NOLINT, for sysctl
+#include <sys/sysctl.h> // for sysctl
#endif
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
-#include <android/log.h> // NOLINT
+#include <android/log.h>
#endif
#include <cmath>
@@ -52,7 +52,7 @@
#endif
#if V8_OS_LINUX
-#include <sys/prctl.h> // NOLINT, for prctl
+#include <sys/prctl.h> // for prctl
#endif
#if defined(V8_OS_FUCHSIA)
@@ -82,7 +82,7 @@ extern int madvise(caddr_t, size_t, int);
#endif
#if defined(V8_LIBC_GLIBC)
-extern "C" void* __libc_stack_end; // NOLINT
+extern "C" void* __libc_stack_end;
#endif
namespace v8 {
@@ -936,8 +936,7 @@ static void InitializeTlsBaseOffset() {
buffer[kBufferSize - 1] = '\0';
char* period_pos = strchr(buffer, '.');
*period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
+ int kernel_version_major = static_cast<int>(strtol(buffer, nullptr, 10));
// The constants below are taken from pthreads.s from the XNU kernel
// sources archive at www.opensource.apple.com.
if (kernel_version_major < 11) {
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 50da60c72f..9fbb257076 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -30,7 +30,7 @@
#include <VersionHelpers.h>
#if defined(_MSC_VER)
-#include <crtdbg.h> // NOLINT
+#include <crtdbg.h>
#endif // defined(_MSC_VER)
// Extra functions for MinGW. Most of these are the _s functions which are in
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 83a7a3392f..ec107bd290 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -12,9 +12,9 @@
#endif
#if V8_OS_MACOSX
-#include <dispatch/dispatch.h> // NOLINT
+#include <dispatch/dispatch.h>
#elif V8_OS_POSIX
-#include <semaphore.h> // NOLINT
+#include <semaphore.h>
#endif
#if V8_OS_STARBOARD
diff --git a/deps/v8/src/base/sanitizer/asan.h b/deps/v8/src/base/sanitizer/asan.h
new file mode 100644
index 0000000000..291006d58c
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/asan.h
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AddressSanitizer support.
+
+#ifndef V8_BASE_SANITIZER_ASAN_H_
+#define V8_BASE_SANITIZER_ASAN_H_
+
+#include <type_traits>
+
+#include "src/base/macros.h"
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+
+#include <sanitizer/asan_interface.h>
+
+#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
+#error \
+ "ASAN_POISON_MEMORY_REGION and ASAN_UNPOISON_MEMORY_REGION must be defined"
+#endif
+
+#else // !V8_USE_ADDRESS_SANITIZER
+
+#define ASAN_POISON_MEMORY_REGION(start, size) \
+ static_assert(std::is_pointer<decltype(start)>::value, \
+ "static type violation"); \
+ static_assert(std::is_convertible<decltype(size), size_t>::value, \
+ "static type violation"); \
+ USE(start, size)
+
+#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
+ ASAN_POISON_MEMORY_REGION(start, size)
+
+#endif // !V8_USE_ADDRESS_SANITIZER
+
+#endif // V8_BASE_SANITIZER_ASAN_H_
diff --git a/deps/v8/src/base/sanitizer/lsan-page-allocator.cc b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc
new file mode 100644
index 0000000000..bb52eb368f
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc
@@ -0,0 +1,75 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sanitizer/lsan-page-allocator.h"
+
+#include "include/v8-platform.h"
+#include "src/base/logging.h"
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator)
+ : page_allocator_(page_allocator),
+ allocate_page_size_(page_allocator_->AllocatePageSize()),
+ commit_page_size_(page_allocator_->CommitPageSize()) {
+ DCHECK_NOT_NULL(page_allocator);
+}
+
+void* LsanPageAllocator::AllocatePages(void* hint, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access) {
+ void* result = page_allocator_->AllocatePages(hint, size, alignment, access);
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result, size);
+ }
+#endif
+ return result;
+}
+
+std::unique_ptr<v8::PageAllocator::SharedMemory>
+LsanPageAllocator::AllocateSharedPages(size_t size,
+ const void* original_address) {
+ auto result = page_allocator_->AllocateSharedPages(size, original_address);
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result->GetMemory(), size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::CanAllocateSharedPages() {
+ return page_allocator_->CanAllocateSharedPages();
+}
+
+bool LsanPageAllocator::FreePages(void* address, size_t size) {
+ bool result = page_allocator_->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::ReleasePages(void* address, size_t size,
+ size_t new_size) {
+ bool result = page_allocator_->ReleasePages(address, size, new_size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
+ }
+#endif
+ return result;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/sanitizer/lsan-page-allocator.h b/deps/v8/src/base/sanitizer/lsan-page-allocator.h
new file mode 100644
index 0000000000..4c8a1f04a0
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.h
@@ -0,0 +1,60 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+#define V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// This is a v8::PageAllocator implementation that decorates provided page
+// allocator object with leak sanitizer notifications when LEAK_SANITIZER
+// is defined.
+class V8_BASE_EXPORT LsanPageAllocator : public v8::PageAllocator {
+ public:
+ explicit LsanPageAllocator(v8::PageAllocator* page_allocator);
+ ~LsanPageAllocator() override = default;
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ return page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ std::unique_ptr<SharedMemory> AllocateSharedPages(
+ size_t size, const void* original_address) override;
+
+ bool CanAllocateSharedPages() override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override {
+ return page_allocator_->SetPermissions(address, size, access);
+ }
+
+ private:
+ v8::PageAllocator* const page_allocator_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/sanitizer/lsan.h
index fd9bbd21c1..2d7dcd7f68 100644
--- a/deps/v8/src/base/lsan.h
+++ b/deps/v8/src/base/sanitizer/lsan.h
@@ -4,14 +4,16 @@
// LeakSanitizer support.
-#ifndef V8_BASE_LSAN_H_
-#define V8_BASE_LSAN_H_
+#ifndef V8_BASE_SANITIZER_LSAN_H_
+#define V8_BASE_SANITIZER_LSAN_H_
#include <type_traits>
-// There is no compile time flag for LSan, to enable this whenever ASan is
+#include "src/base/macros.h"
+
+// There is no compile time flag for LSan, so enable this whenever ASan is
// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
-// On windows, LSan is not implemented yet, so disable it there.
+// On Windows, LSan is not implemented yet, so disable it there.
#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
#include <sanitizer/lsan_interface.h>
@@ -26,4 +28,4 @@
#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
-#endif // V8_BASE_LSAN_H_
+#endif // V8_BASE_SANITIZER_LSAN_H_
diff --git a/deps/v8/src/base/sanitizer/msan.h b/deps/v8/src/base/sanitizer/msan.h
new file mode 100644
index 0000000000..e15208efaf
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/msan.h
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MemorySanitizer support.
+
+#ifndef V8_BASE_SANITIZER_MSAN_H_
+#define V8_BASE_SANITIZER_MSAN_H_
+
+#include "src/base/macros.h"
+#include "src/base/memory.h"
+
+#ifdef V8_USE_MEMORY_SANITIZER
+
+#include <sanitizer/msan_interface.h>
+
+// Marks a memory range as uninitialized, as if it was allocated here.
+#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \
+ __msan_allocated_memory(reinterpret_cast<const void*>(start), (size))
+
+// Marks a memory range as initialized.
+#define MSAN_MEMORY_IS_INITIALIZED(start, size) \
+ __msan_unpoison(reinterpret_cast<const void*>(start), (size))
+
+#else // !V8_USE_MEMORY_SANITIZER
+
+#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \
+ static_assert((std::is_pointer<decltype(start)>::value || \
+ std::is_same<v8::base::Address, decltype(start)>::value), \
+ "static type violation"); \
+ static_assert(std::is_convertible<decltype(size), size_t>::value, \
+ "static type violation"); \
+ USE(start, size)
+
+#define MSAN_MEMORY_IS_INITIALIZED(start, size) \
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size)
+
+#endif // V8_USE_MEMORY_SANITIZER
+
+#endif // V8_BASE_SANITIZER_MSAN_H_
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index c337b9052d..b087d44be4 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -154,6 +154,13 @@ class SmallVector {
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
T* new_storage =
reinterpret_cast<T*>(base::Malloc(sizeof(T) * new_capacity));
+ if (new_storage == nullptr) {
+ // Should be: V8::FatalProcessOutOfMemory, but we don't include V8 from
+ // base. The message is intentionally the same as FatalProcessOutOfMemory
+ // since that will help fuzzers and chromecrash to categorize such
+ // crashes appropriately.
+ FATAL("Fatal process out of memory: base::SmallVector::Grow");
+ }
base::Memcpy(new_storage, begin_, sizeof(T) * in_use);
if (is_big()) base::Free(begin_);
begin_ = new_storage;
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 4f082845d9..f222593e2d 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -98,6 +98,15 @@ struct make_void {
template <class... Ts>
using void_t = typename make_void<Ts...>::type;
+// Corresponds to C++17's std::conjunction
+template <class...>
+struct conjunction : std::true_type {};
+template <class B>
+struct conjunction<B> : B {};
+template <class B, class... Bn>
+struct conjunction<B, Bn...>
+ : std::conditional_t<bool(B::value), conjunction<Bn...>, B> {};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/v8-fallthrough.h b/deps/v8/src/base/v8-fallthrough.h
index f61238de06..a6dc6972d6 100644
--- a/deps/v8/src/base/v8-fallthrough.h
+++ b/deps/v8/src/base/v8-fallthrough.h
@@ -13,7 +13,7 @@
// So do not include this header in any of v8's public headers -- only
// use it in src/, not in include/.
#if defined(__clang__)
-#define V8_FALLTHROUGH [[clang::fallthrough]] // NOLINT(whitespace/braces)
+#define V8_FALLTHROUGH [[clang::fallthrough]]
#else
#define V8_FALLTHROUGH
#endif
diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h
index baeb5b9430..96ee42cf6e 100644
--- a/deps/v8/src/base/vlq.h
+++ b/deps/v8/src/base/vlq.h
@@ -14,60 +14,95 @@ namespace v8 {
namespace base {
static constexpr uint32_t kContinueShift = 7;
-static constexpr uint32_t kContinueMask = 1 << kContinueShift;
-static constexpr uint32_t kDataMask = kContinueMask - 1;
+static constexpr uint32_t kContinueBit = 1 << kContinueShift;
+static constexpr uint32_t kDataMask = kContinueBit - 1;
// Encodes an unsigned value using variable-length encoding and stores it using
-// the passed process_byte function.
-inline void VLQEncodeUnsigned(const std::function<void(byte)>& process_byte,
- uint32_t value) {
- bool has_next;
+// the passed process_byte function. The function should return a pointer to
+// the byte that was written, so that VLQEncodeUnsigned can mutate it after
+// writing it.
+template <typename Function>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<Function>()(0)), byte*>::value,
+ void>::type
+VLQEncodeUnsigned(Function&& process_byte, uint32_t value) {
+ byte* written_byte = process_byte(value);
+ if (value <= kDataMask) {
+ // Value fits in first byte, early return.
+ return;
+ }
do {
- byte cur_byte = value & kDataMask;
+ // Turn on continuation bit in the byte we just wrote.
+ *written_byte |= kContinueBit;
value >>= kContinueShift;
- has_next = value != 0;
- // The most significant bit is set when we are not done with the value yet.
- cur_byte |= static_cast<uint32_t>(has_next) << kContinueShift;
- process_byte(cur_byte);
- } while (has_next);
+ written_byte = process_byte(value);
+ } while (value > kDataMask);
}
// Encodes value using variable-length encoding and stores it using the passed
// process_byte function.
-inline void VLQEncode(const std::function<void(byte)>& process_byte,
- int32_t value) {
+template <typename Function>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<Function>()(0)), byte*>::value,
+ void>::type
+VLQEncode(Function&& process_byte, int32_t value) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK_NE(value, std::numeric_limits<int32_t>::min());
bool is_negative = value < 0;
// Encode sign in least significant bit.
uint32_t bits = static_cast<uint32_t>((is_negative ? -value : value) << 1) |
static_cast<uint32_t>(is_negative);
- VLQEncodeUnsigned(process_byte, bits);
+ VLQEncodeUnsigned(std::forward<Function>(process_byte), bits);
}
// Wrapper of VLQEncode for std::vector backed storage containers.
template <typename A>
inline void VLQEncode(std::vector<byte, A>* data, int32_t value) {
- VLQEncode([data](byte value) { data->push_back(value); }, value);
+ VLQEncode(
+ [data](byte value) {
+ data->push_back(value);
+ return &data->back();
+ },
+ value);
}
// Wrapper of VLQEncodeUnsigned for std::vector backed storage containers.
template <typename A>
inline void VLQEncodeUnsigned(std::vector<byte, A>* data, uint32_t value) {
- VLQEncodeUnsigned([data](byte value) { data->push_back(value); }, value);
+ VLQEncodeUnsigned(
+ [data](byte value) {
+ data->push_back(value);
+ return &data->back();
+ },
+ value);
+}
+
+// Decodes a variable-length encoded unsigned value from bytes returned by
+// successive calls to the given function.
+template <typename GetNextFunction>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<GetNextFunction>()()), byte>::value,
+ uint32_t>::type
+VLQDecodeUnsigned(GetNextFunction&& get_next) {
+ byte cur_byte = get_next();
+ // Single byte fast path; no need to mask.
+ if (cur_byte <= kDataMask) {
+ return cur_byte;
+ }
+ uint32_t bits = cur_byte & kDataMask;
+ for (int shift = kContinueShift; shift <= 32; shift += kContinueShift) {
+ byte cur_byte = get_next();
+ bits |= (cur_byte & kDataMask) << shift;
+ if (cur_byte <= kDataMask) break;
+ }
+ return bits;
}
// Decodes a variable-length encoded unsigned value stored in contiguous memory
// starting at data_start + index, updating index to where the next encoded
// value starts.
inline uint32_t VLQDecodeUnsigned(byte* data_start, int* index) {
- uint32_t bits = 0;
- for (int shift = 0; true; shift += kContinueShift) {
- byte cur_byte = data_start[(*index)++];
- bits += (cur_byte & kDataMask) << shift;
- if ((cur_byte & kContinueMask) == 0) break;
- }
- return bits;
+ return VLQDecodeUnsigned([&] { return data_start[(*index)++]; });
}
// Decodes a variable-length encoded value stored in contiguous memory starting