summaryrefslogtreecommitdiff
path: root/deps/v8/src/base
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-02-14 11:27:26 +0100
committerMichaël Zasso <targos@protonmail.com>2017-02-22 15:55:42 +0100
commit7a77daf24344db7942e34c962b0f1ee729ab7af5 (patch)
treee7cbe7bf4e2f4b802a8f5bc18336c546cd6a0d7f /deps/v8/src/base
parent5f08871ee93ea739148cc49e0f7679e33c70295a (diff)
downloadnode-new-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.gz
deps: update V8 to 5.6.326.55
PR-URL: https://github.com/nodejs/node/pull/10992 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/base')
-rw-r--r--deps/v8/src/base/atomic-utils.h56
-rw-r--r--deps/v8/src/base/atomicops.h41
-rw-r--r--deps/v8/src/base/atomicops_internals_arm64_gcc.h317
-rw-r--r--deps/v8/src/base/atomicops_internals_arm_gcc.h304
-rw-r--r--deps/v8/src/base/atomicops_internals_atomicword_compat.h10
-rw-r--r--deps/v8/src/base/atomicops_internals_mac.h216
-rw-r--r--deps/v8/src/base/atomicops_internals_mips64_gcc.h310
-rw-r--r--deps/v8/src/base/atomicops_internals_mips_gcc.h161
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h172
-rw-r--r--deps/v8/src/base/atomicops_internals_ppc_gcc.h168
-rw-r--r--deps/v8/src/base/atomicops_internals_s390_gcc.h152
-rw-r--r--deps/v8/src/base/atomicops_internals_tsan.h363
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.cc116
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.h275
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_msvc.h41
-rw-r--r--deps/v8/src/base/base-export.h31
-rw-r--r--deps/v8/src/base/bits.h32
-rw-r--r--deps/v8/src/base/compiler-specific.h42
-rw-r--r--deps/v8/src/base/cpu.cc8
-rw-r--r--deps/v8/src/base/cpu.h3
-rw-r--r--deps/v8/src/base/debug/stack_trace.h5
-rw-r--r--deps/v8/src/base/division-by-constant.cc11
-rw-r--r--deps/v8/src/base/division-by-constant.h25
-rw-r--r--deps/v8/src/base/file-utils.cc6
-rw-r--r--deps/v8/src/base/file-utils.h13
-rw-r--r--deps/v8/src/base/functional.h9
-rw-r--r--deps/v8/src/base/hashmap.h19
-rw-r--r--deps/v8/src/base/ieee754.h42
-rw-r--r--deps/v8/src/base/logging.h14
-rw-r--r--deps/v8/src/base/once.h4
-rw-r--r--deps/v8/src/base/platform/condition-variable.h3
-rw-r--r--deps/v8/src/base/platform/mutex.h5
-rw-r--r--deps/v8/src/base/platform/platform.h11
-rw-r--r--deps/v8/src/base/platform/semaphore.h3
-rw-r--r--deps/v8/src/base/platform/time.h16
-rw-r--r--deps/v8/src/base/ring-buffer.h54
-rw-r--r--deps/v8/src/base/sys-info.h4
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h3
38 files changed, 506 insertions, 2559 deletions
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 31db603bf9..f40853c587 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -51,6 +51,60 @@ class AtomicNumber {
base::AtomicWord value_;
};
+// This type uses no barrier accessors to change atomic word. Be careful with
+// data races.
+template <typename T>
+class NoBarrierAtomicValue {
+ public:
+ NoBarrierAtomicValue() : value_(0) {}
+
+ explicit NoBarrierAtomicValue(T initial)
+ : value_(cast_helper<T>::to_storage_type(initial)) {}
+
+ static NoBarrierAtomicValue* FromAddress(void* address) {
+ return reinterpret_cast<base::NoBarrierAtomicValue<T>*>(address);
+ }
+
+ V8_INLINE bool TrySetValue(T old_value, T new_value) {
+ return base::NoBarrier_CompareAndSwap(
+ &value_, cast_helper<T>::to_storage_type(old_value),
+ cast_helper<T>::to_storage_type(new_value)) ==
+ cast_helper<T>::to_storage_type(old_value);
+ }
+
+ V8_INLINE T Value() const {
+ return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
+ }
+
+ V8_INLINE void SetValue(T new_value) {
+ base::NoBarrier_Store(&value_, cast_helper<T>::to_storage_type(new_value));
+ }
+
+ private:
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+
+ template <typename S>
+ struct cast_helper {
+ static base::AtomicWord to_storage_type(S value) {
+ return static_cast<base::AtomicWord>(value);
+ }
+ static S to_return_type(base::AtomicWord value) {
+ return static_cast<S>(value);
+ }
+ };
+
+ template <typename S>
+ struct cast_helper<S*> {
+ static base::AtomicWord to_storage_type(S* value) {
+ return reinterpret_cast<base::AtomicWord>(value);
+ }
+ static S* to_return_type(base::AtomicWord value) {
+ return reinterpret_cast<S*>(value);
+ }
+ };
+
+ base::AtomicWord value_;
+};
// Flag using T atomically. Also accepts void* as T.
template <typename T>
@@ -73,7 +127,7 @@ class AtomicValue {
}
V8_INLINE void SetBits(T bits, T mask) {
- DCHECK_EQ(bits & ~mask, 0);
+ DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
do {
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 973e96b9ea..927ebbee11 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -26,9 +26,17 @@
#define V8_BASE_ATOMICOPS_H_
#include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
-#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+#if defined(V8_OS_WIN) && defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
@@ -100,13 +108,11 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
@@ -124,44 +130,25 @@ Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
} // namespace base
} // namespace v8
-// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "src/base/atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#if defined(V8_OS_WIN)
+// TODO(hpayer): The MSVC header includes windows.h, which other files end up
+// relying on. Fix this as part of crbug.com/559247.
#include "src/base/atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__)
-#include "src/base/atomicops_internals_mac.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
-#include "src/base/atomicops_internals_arm64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
-#include "src/base/atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
-#include "src/base/atomicops_internals_ppc_gcc.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "src/base/atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
-#include "src/base/atomicops_internals_mips_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
-#include "src/base/atomicops_internals_mips64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_S390
-#include "src/base/atomicops_internals_s390_gcc.h"
#else
-#error "Atomic operations are not supported on your platform"
+#include "src/base/atomicops_internals_portable.h"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
-#if defined(__APPLE__) || defined(__OpenBSD__) || defined(V8_OS_AIX)
+#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif
diff --git a/deps/v8/src/base/atomicops_internals_arm64_gcc.h b/deps/v8/src/base/atomicops_internals_arm64_gcc.h
deleted file mode 100644
index f24050a3e6..0000000000
--- a/deps/v8/src/base/atomicops_internals_arm64_gcc.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace base {
-
-inline void MemoryBarrier() {
- __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
-}
-
-// NoBarrier versions of the operation include "memory" in the clobber list.
-// This is not required for direct usage of the NoBarrier versions of the
-// operations. However this is required for correctness when they are used as
-// part of the Acquire or Release versions, to ensure that nothing from outside
-// the call is reordered between the operation and the memory barrier. This does
-// not change the code generated, so has no or minimal impact on the
-// NoBarrier operations.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- "1: \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"IJr" (old_value),
- [new_value]"r" (new_value)
- : "cc", "memory"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
- "add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
- "cbnz %w[temp], 0b \n\t" // Retry on failure.
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [increment]"IJr" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
-
- MemoryBarrier();
- result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
-
- return result;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
-
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
-
- return prev;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
-
- MemoryBarrier();
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __asm__ __volatile__ ( // NOLINT
- "stlr %w[value], %[ptr] \n\t"
- : [ptr]"=Q" (*ptr)
- : [value]"r" (value)
- : "memory"
- ); // NOLINT
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value;
-
- __asm__ __volatile__ ( // NOLINT
- "ldar %w[value], %[ptr] \n\t"
- : [value]"=r" (value)
- : [ptr]"Q" (*ptr)
- : "memory"
- ); // NOLINT
-
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-// 64-bit versions of the operations.
-// See the 32-bit versions for comments.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], %[ptr] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "1: \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"IJr" (old_value),
- [new_value]"r" (new_value)
- : "cc", "memory"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], %[ptr] \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], %[ptr] \n\t"
- "add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [increment]"IJr" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
-
- MemoryBarrier();
- result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
-
- return result;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
-
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
-
- return prev;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
-
- MemoryBarrier();
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__ ( // NOLINT
- "stlr %x[value], %[ptr] \n\t"
- : [ptr]"=Q" (*ptr)
- : [value]"r" (value)
- : "memory"
- ); // NOLINT
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value;
-
- __asm__ __volatile__ ( // NOLINT
- "ldar %x[value], %[ptr] \n\t"
- : [value]"=r" (value)
- : [ptr]"Q" (*ptr)
- : "memory"
- ); // NOLINT
-
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_arm_gcc.h b/deps/v8/src/base/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 8d049e04b4..0000000000
--- a/deps/v8/src/base/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-#if defined(__QNXNTO__)
-#include <sys/cpuinline.h>
-#endif
-
-namespace v8 {
-namespace base {
-
-// Memory barriers on ARM are funky, but the kernel is here to help:
-//
-// * ARMv5 didn't support SMP, there is no memory barrier instruction at
-// all on this architecture, or when targeting its machine code.
-//
-// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
-// writing a random value to a very specific coprocessor register.
-//
-// * On ARMv7, the "dmb" instruction is used to perform a full memory
-// barrier (though writing to the co-processor will still work).
-// However, on single core devices (e.g. Nexus One, or Nexus S),
-// this instruction will take up to 200 ns, which is huge, even though
-// it's completely un-needed on these devices.
-//
-// * There is no easy way to determine at runtime if the device is
-// single or multi-core. However, the kernel provides a useful helper
-// function at a fixed memory address (0xffff0fa0), which will always
-// perform a memory barrier in the most efficient way. I.e. on single
-// core devices, this is an empty function that exits immediately.
-// On multi-core devices, it implements a full memory barrier.
-//
-// * This source could be compiled to ARMv5 machine code that runs on a
-// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
-// are needed for correct execution. Always call the kernel helper, even
-// when targeting ARMv5TE.
-//
-
-inline void MemoryBarrier() {
-#if defined(__ANDROID__)
- // Note: This is a function call, which is also an implicit compiler barrier.
- typedef void (*KernelMemoryBarrierFunc)();
- ((KernelMemoryBarrierFunc)0xffff0fa0)();
-#elif defined(__QNXNTO__)
- __cpu_membarrier();
-#else
- // Fallback to GCC built-in function
- __sync_synchronize();
-#endif
-}
-
-// An ARM toolchain would only define one of these depending on which
-// variant of the target architecture is being used. This tests against
-// any known ARMv6 or ARMv7 variant, where it is possible to directly
-// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_8A__) || \
- defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
- defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- int reloop;
- do {
- // The following is equivalent to:
- //
- // prev_value = LDREX(ptr)
- // reloop = 0
- // if (prev_value != old_value)
- // reloop = STREX(ptr, new_value)
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " mov %1, #0\n"
- " cmp %0, %4\n"
-#ifdef __thumb2__
- " it eq\n"
-#endif
- " strexeq %1, %5, [%3]\n"
- : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(old_value), "r"(new_value)
- : "cc", "memory");
- } while (reloop != 0);
- return prev_value;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return result;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 value;
- int reloop;
- do {
- // Equivalent to:
- //
- // value = LDREX(ptr)
- // value += increment
- // reloop = STREX(ptr, value)
- //
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " add %0, %0, %4\n"
- " strex %1, %0, [%3]\n"
- : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(increment)
- : "cc", "memory");
- } while (reloop);
- return value;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- // TODO(digit): Investigate if it's possible to implement this with
- // a single MemoryBarrier() operation between the LDREX and STREX.
- // See http://crbug.com/246514
- MemoryBarrier();
- Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return result;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- int reloop;
- do {
- // old_value = LDREX(ptr)
- // reloop = STREX(ptr, new_value)
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " strex %1, %4, [%3]\n"
- : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(new_value)
- : "cc", "memory");
- } while (reloop != 0);
- return old_value;
-}
-
-// This tests against any known ARMv5 variant.
-#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-
-// The kernel also provides a helper function to perform an atomic
-// compare-and-swap operation at the hard-wired address 0xffff0fc0.
-// On ARMv5, this is implemented by a special code path that the kernel
-// detects and treats specially when thread pre-emption happens.
-// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
-//
-// Note that this always perform a full memory barrier, there is no
-// need to add calls MemoryBarrier() before or after it. It also
-// returns 0 on success, and 1 on exit.
-//
-// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
-// use newer kernel revisions, so this should not be a concern.
-namespace {
-
-inline int LinuxKernelCmpxchg(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr) {
- typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
- return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
-}
-
-} // namespace
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- for (;;) {
- prev_value = *ptr;
- if (prev_value != old_value)
- return prev_value;
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
- return old_value;
- }
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- for (;;) {
- prev_value = *ptr;
- if (prev_value != old_value) {
- // Always ensure acquire semantics.
- MemoryBarrier();
- return prev_value;
- }
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
- return old_value;
- }
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- // This could be implemented as:
- // MemoryBarrier();
- // return NoBarrier_CompareAndSwap();
- //
- // But would use 3 barriers per succesful CAS. To save performance,
- // use Acquire_CompareAndSwap(). Its implementation guarantees that:
- // - A succesful swap uses only 2 barriers (in the kernel helper).
- // - An early return due to (prev_value != old_value) performs
- // a memory barrier with no store, which is equivalent to the
- // generic implementation above.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#else
-# error "Your CPU's ARM architecture is not supported yet"
-#endif
-
-// NOTE: Atomicity of the following load and store operations is only
-// guaranteed in case of 32-bit alignement of |ptr| values.
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-// Byte accessors.
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
index 4f758a7299..5071f442b4 100644
--- a/deps/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -67,11 +67,6 @@ inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::base::Acquire_Store(
- reinterpret_cast<volatile Atomic32*>(ptr), value);
-}
-
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::base::Release_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
@@ -87,11 +82,6 @@ inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
reinterpret_cast<volatile const Atomic32*>(ptr));
}
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::base::Release_Load(
- reinterpret_cast<volatile const Atomic32*>(ptr));
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/atomicops_internals_mac.h b/deps/v8/src/base/atomicops_internals_mac.h
deleted file mode 100644
index c112506238..0000000000
--- a/deps/v8/src/base/atomicops_internals_mac.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace base {
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-inline void MemoryBarrier() { OSMemoryBarrier(); }
-
-inline void AcquireMemoryBarrier() {
-// On x86 processors, loads already have acquire semantics, so
-// there is no need to put a full barrier here.
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- ATOMICOPS_COMPILER_BARRIER();
-#else
- MemoryBarrier();
-#endif
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- AcquireMemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- reinterpret_cast<volatile int64_t*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- reinterpret_cast<volatile int64_t*>(ptr)));
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment,
- reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(
- old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- AcquireMemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif // defined(__LP64__)
-
-#undef ATOMICOPS_COMPILER_BARRIER
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips64_gcc.h b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
deleted file mode 100644
index cf2e194e50..0000000000
--- a/deps/v8/src/base/atomicops_internals_mips64_gcc.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "sc %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__(
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
- : "Ir"(increment), "m"(*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- MemoryBarrier();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-
-// 64-bit versions of the atomic ops.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "scd %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "scd %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp, temp2;
-
- __asm__ __volatile__(
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %0, %2\n" // temp = *ptr
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- "scd %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
- : "Ir"(increment), "m"(*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- MemoryBarrier();
- Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips_gcc.h b/deps/v8/src/base/atomicops_internals_mips_gcc.h
deleted file mode 100644
index 8d65db2127..0000000000
--- a/deps/v8/src/base/atomicops_internals_mips_gcc.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, 0(%4)\n" // prev = *ptr
- "bne %0, %2, 2f\n" // if (prev != old_value) goto 2
- "move %1, %3\n" // tmp = new_value
- "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "r" (ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- ".set at\n"
- "1:\n"
- "ll %1, 0(%3)\n" // old = *ptr
- "move %0, %2\n" // temp = new_value
- "sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old)
- : "r" (new_value), "r" (ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, 0(%3)\n" // temp = *ptr
- "addu %1, %0, %2\n" // temp2 = temp + increment
- "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %2\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r" (temp), "=&r" (temp2)
- : "Ir" (increment), "r" (ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- MemoryBarrier();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
new file mode 100644
index 0000000000..72c1d9a328
--- /dev/null
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -0,0 +1,172 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// Of note in this implementation:
+// * All NoBarrier variants are implemented as relaxed.
+// * All Barrier variants are implemented as sequentially-consistent.
+// * Compare exchange's failure ordering is always the same as the success one
+// (except for release, which fails as relaxed): using a weaker ordering is
+// only valid under certain uses of compare exchange.
+// * Acquire store doesn't exist in the C11 memory model, it is instead
+// implemented as a relaxed store followed by a sequentially consistent
+// fence.
+// * Release load doesn't exist in the C11 memory model, it is instead
+// implemented as sequentially consistent fence followed by a relaxed load.
+// * Atomic increment is expected to return the post-incremented value, whereas
+// C11 fetch add returns the previous value. The implementation therefore
+// needs to increment twice (which the compiler should be able to detect and
+// optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "src/base/build_config.h"
+
+namespace v8 {
+namespace base {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h.
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+ // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+ // not defined, leading to the linker complaining about undefined references.
+ __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#if defined(V8_HOST_ARCH_64_BIT)
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#endif // defined(V8_HOST_ARCH_64_BIT)
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/deps/v8/src/base/atomicops_internals_ppc_gcc.h b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
deleted file mode 100644
index 0d16500d1b..0000000000
--- a/deps/v8/src/base/atomicops_internals_ppc_gcc.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
- return new_value;
- // The exchange took place as expected.
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory"); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_PPC64
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- for (;;) {
- Atomic64 old_value = *ptr;
- Atomic64 new_value = old_value + increment;
- if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
- return new_value;
- // The exchange took place as expected.
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_s390_gcc.h b/deps/v8/src/base/atomicops_internals_s390_gcc.h
deleted file mode 100644
index 6e34f305e3..0000000000
--- a/deps/v8/src/base/atomicops_internals_s390_gcc.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() { __sync_synchronize(); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_S390X
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/deps/v8/src/base/atomicops_internals_tsan.h b/deps/v8/src/base/atomicops_internals_tsan.h
deleted file mode 100644
index 646e5bd4b7..0000000000
--- a/deps/v8/src/base/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-
-namespace v8 {
-namespace base {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-
-extern "C" {
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
-
-#if defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)
-typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
-#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
-#endif
-
-typedef enum {
- __tsan_memory_order_relaxed,
- __tsan_memory_order_consume,
- __tsan_memory_order_acquire,
- __tsan_memory_order_release,
- __tsan_memory_order_acq_rel,
- __tsan_memory_order_seq_cst,
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
- __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
- __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
- __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
- __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
- __tsan_memory_order mo);
-
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
- __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
- __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
- __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
- __tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
- __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
- __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
- __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-
-__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-} // extern "C"
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void MemoryBarrier() {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.cc b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
deleted file mode 100644
index c0310300a1..0000000000
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include <string.h> // NOLINT(build/include)
-
-#include "src/base/atomicops.h"
-
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file. If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// Inline cpuid instruction. In PIC compilations, %ebx contains the address
-// of the global offset table. To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%ebx, %%edi\n" \
- "cpuid\n" \
- "xchg %%edi, %%ebx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid) // initialize the struct only on x86
-
-namespace v8 {
-namespace base {
-
-// Set the flags so that code will run correctly and conservatively, so even
-// if we haven't been initialized yet, we're probably single threaded, and our
-// default values should hopefully be pretty safe.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
- false, // bug can't exist before process spawns multiple threads
-#if !defined(__SSE2__)
- false, // no SSE2
-#endif
-};
-
-} // namespace base
-} // namespace v8
-
-namespace {
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-void AtomicOps_Internalx86CPUFeaturesInit() {
- using v8::base::AtomicOps_Internalx86CPUFeatures;
-
- uint32_t eax = 0;
- uint32_t ebx = 0;
- uint32_t ecx = 0;
- uint32_t edx = 0;
-
- // Get vendor string (issue CPUID with eax = 0)
- cpuid(eax, ebx, ecx, edx, 0);
- char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
- vendor[12] = 0;
-
- // get feature flags in ecx/edx, and family/model in eax
- cpuid(eax, ebx, ecx, edx, 1);
-
- int family = (eax >> 8) & 0xf; // family and model fields
- int model = (eax >> 4) & 0xf;
- if (family == 0xf) { // use extended family and model fields
- family += (eax >> 20) & 0xff;
- model += ((eax >> 16) & 0xf) << 4;
- }
-
- // Opteron Rev E has a bug in which on very rare occasions a locked
- // instruction doesn't act as a read-acquire barrier if followed by a
- // non-locked read-modify-write instruction. Rev F has this bug in
- // pre-release versions, but not in versions released to customers,
- // so we test only for Rev E, which is family 15, model 32..63 inclusive.
- if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
- family == 15 &&
- 32 <= model && model <= 63) {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
- } else {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
- }
-
-#if !defined(__SSE2__)
- // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
- AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-#endif
-}
-
-class AtomicOpsx86Initializer {
- public:
- AtomicOpsx86Initializer() {
- AtomicOps_Internalx86CPUFeaturesInit();
- }
-};
-
-
-// A global to get use initialized on startup via static initialization :/
-AtomicOpsx86Initializer g_initer;
-
-} // namespace
-
-#endif // if x86
-
-#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.h b/deps/v8/src/base/atomicops_internals_x86_gcc.h
deleted file mode 100644
index 55bc44cd8b..0000000000
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
-#if !defined(__SSE2__)
- bool has_sse2; // Processor has SSE2.
-#endif
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-#if defined(__x86_64__) || defined(__SSE2__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- __asm__ __volatile__("mfence" : : : "memory");
- } else { // mfence is faster but not present on PIII
- Atomic32 x = 0;
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
- }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- *ptr = value;
- __asm__ __volatile__("mfence" : : : "memory");
- } else {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier on PIII
- }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- ATOMICOPS_COMPILER_BARRIER();
- *ptr = value; // An x86 store acts as a release barrier.
- // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
- // See comments in Atomic64 version of Release_Store(), below.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- ATOMICOPS_COMPILER_BARRIER();
-
- *ptr = value; // An x86 store acts as a release barrier
- // for current AMD/Intel chips as of Jan 2008.
- // See also Acquire_Load(), below.
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
- //
- // x86 stores/loads fail to act as barriers for a few instructions (clflush
- // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
- // not generated by the compiler, and are rare. Users of these instructions
- // need to know about cache behaviour in any case since all of these involve
- // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
- // for current AMD/Intel chips as of Jan 2008.
- // See also Release_Store(), above.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#endif // defined(__x86_64__)
-
-} // namespace base
-} // namespace v8
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_msvc.h b/deps/v8/src/base/atomicops_internals_x86_msvc.h
index c37bc78df6..0d2068e9f0 100644
--- a/deps/v8/src/base/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/base/atomicops_internals_x86_msvc.h
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This file is an internal atomic implementation, use atomicops.h instead.
+// This file is an internal atomic implementation, use base/atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
@@ -26,25 +26,23 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value),
+ reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
- LONG result = InterlockedExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value));
+ LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- return InterlockedExchangeAdd(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(increment)) + increment;
+ return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) +
+ increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
@@ -52,9 +50,6 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
return Barrier_AtomicIncrement(ptr, increment);
}
-#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
-#error "We require at least vs2005 for MemoryBarrier"
-#endif
inline void MemoryBarrier() {
#if defined(V8_HOST_ARCH_64_BIT)
// See #undef and note at the top of this file.
@@ -85,11 +80,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
@@ -108,16 +98,11 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return value;
}
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
-STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
@@ -152,11 +137,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
@@ -177,11 +157,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return value;
}
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
diff --git a/deps/v8/src/base/base-export.h b/deps/v8/src/base/base-export.h
new file mode 100644
index 0000000000..a2b3dacaf7
--- /dev/null
+++ b/deps/v8/src/base/base-export.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BASE_EXPORT_H_
+#define V8_BASE_BASE_EXPORT_H_
+
+#include "include/v8config.h"
+
+#if V8_OS_WIN
+
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllexport)
+#elif USING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllimport)
+#else
+#define V8_BASE_EXPORT
+#endif // BUILDING_V8_BASE_SHARED
+
+#else // !V8_OS_WIN
+
+// Setup for Linux shared library export.
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_BASE_EXPORT
+#endif
+
+#endif // V8_OS_WIN
+
+#endif // V8_BASE_BASE_EXPORT_H_
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index da12ee60fe..b1864940b8 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -6,6 +6,8 @@
#define V8_BASE_BITS_H_
#include <stdint.h>
+
+#include "src/base/base-export.h"
#include "src/base/macros.h"
#if V8_CC_MSVC
#include <intrin.h>
@@ -172,8 +174,7 @@ inline bool IsPowerOfTwo64(uint64_t value) {
// power of two, it is returned as is. |value| must be less than or equal to
// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
// Jr., figure 3-3, page 48, where the function is called clp2.
-uint32_t RoundUpToPowerOfTwo32(uint32_t value);
-
+V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
@@ -241,7 +242,7 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
@@ -265,31 +266,28 @@ inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
-int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
// adds the accumulate value |acc|.
-int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
-
+V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
+ int32_t acc);
// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
// is minint and |rhs| is -1, it returns minint.
-int32_t SignedDiv32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
-int32_t SignedMod32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
@@ -319,18 +317,16 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
// Clamp |value| on overflow and underflow conditions.
-int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
-
+V8_BASE_EXPORT int64_t
+FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
// checks and returns the result.
-int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
// checks and returns the result.
-int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
} // namespace bits
} // namespace base
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 822893ffec..1858caa047 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -60,4 +60,46 @@
#define STATIC_CONST_MEMBER_DEFINITION
#endif
+#if V8_CC_MSVC
+
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress : n))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) \
+ MSVC_SUPPRESS_WARNING(4275) \
+ code
+
+#else // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define NON_EXPORTED_BASE(code) code
+
+#endif // V8_CC_MSVC
+
#endif // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 7757192920..cf1f9c399d 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -415,7 +415,7 @@ CPU::CPU()
}
// Check if CPU has non stoppable time stamp counter.
- const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+ const unsigned parameter_containing_non_stop_time_stamp_counter = 0x80000007;
if (num_ext_ids >= parameter_containing_non_stop_time_stamp_counter) {
__cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
@@ -607,7 +607,7 @@ CPU::CPU()
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end;
- implementer_ = strtol(implementer, &end, 0);
+ implementer_ = static_cast<int>(strtol(implementer, &end, 0));
if (end == implementer) {
implementer_ = 0;
}
@@ -617,7 +617,7 @@ CPU::CPU()
char* variant = cpu_info.ExtractField("CPU variant");
if (variant != NULL) {
char* end;
- variant_ = strtol(variant, &end, 0);
+ variant_ = static_cast<int>(strtol(variant, &end, 0));
if (end == variant) {
variant_ = -1;
}
@@ -628,7 +628,7 @@ CPU::CPU()
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end;
- part_ = strtol(part, &end, 0);
+ part_ = static_cast<int>(strtol(part, &end, 0));
if (end == part) {
part_ = 0;
}
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 19d4102f5b..e0fcea1ca0 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -13,6 +13,7 @@
#ifndef V8_BASE_CPU_H_
#define V8_BASE_CPU_H_
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -28,7 +29,7 @@ namespace base {
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
-class CPU final {
+class V8_BASE_EXPORT CPU final {
public:
CPU();
diff --git a/deps/v8/src/base/debug/stack_trace.h b/deps/v8/src/base/debug/stack_trace.h
index e938ef2868..1361bb545a 100644
--- a/deps/v8/src/base/debug/stack_trace.h
+++ b/deps/v8/src/base/debug/stack_trace.h
@@ -13,6 +13,7 @@
#include <iosfwd>
#include <string>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#if V8_OS_POSIX
@@ -31,8 +32,8 @@ namespace debug {
// Enables stack dump to console output on exception and signals.
// When enabled, the process will quit immediately. This is meant to be used in
// tests only!
-bool EnableInProcessStackDumping();
-void DisableSignalStackDump();
+V8_BASE_EXPORT bool EnableInProcessStackDumping();
+V8_BASE_EXPORT void DisableSignalStackDump();
// A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
index 5167b7a60c..03d198e9bf 100644
--- a/deps/v8/src/base/division-by-constant.cc
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -13,13 +13,6 @@ namespace v8 {
namespace base {
template <class T>
-bool MagicNumbersForDivision<T>::operator==(
- const MagicNumbersForDivision& rhs) const {
- return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
-}
-
-
-template <class T>
MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
@@ -100,8 +93,8 @@ MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
// -----------------------------------------------------------------------------
// Instantiations.
-template struct MagicNumbersForDivision<uint32_t>;
-template struct MagicNumbersForDivision<uint64_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>;
template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
diff --git a/deps/v8/src/base/division-by-constant.h b/deps/v8/src/base/division-by-constant.h
index 02e7e14b01..5d063f8bd5 100644
--- a/deps/v8/src/base/division-by-constant.h
+++ b/deps/v8/src/base/division-by-constant.h
@@ -5,6 +5,10 @@
#ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
#define V8_BASE_DIVISION_BY_CONSTANT_H_
+#include <stdint.h>
+
+#include "src/base/base-export.h"
+
namespace v8 {
namespace base {
@@ -14,10 +18,12 @@ namespace base {
// Delight", chapter 10. The template parameter must be one of the unsigned
// integral types.
template <class T>
-struct MagicNumbersForDivision {
+struct V8_BASE_EXPORT MagicNumbersForDivision {
MagicNumbersForDivision(T m, unsigned s, bool a)
: multiplier(m), shift(s), add(a) {}
- bool operator==(const MagicNumbersForDivision& rhs) const;
+ bool operator==(const MagicNumbersForDivision& rhs) const {
+ return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+ }
T multiplier;
unsigned shift;
@@ -28,17 +34,26 @@ struct MagicNumbersForDivision {
// Calculate the multiplier and shift for signed division via multiplication.
// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
template <class T>
-MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
-
+V8_BASE_EXPORT MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
// Calculate the multiplier and shift for unsigned division via multiplication,
// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
// leading_zeros can be used to speed up the calculation if the given number of
// upper bits of the dividend value are known to be zero.
template <class T>
-MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+V8_BASE_EXPORT MagicNumbersForDivision<T> UnsignedDivisionByConstant(
T d, unsigned leading_zeros = 0);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+SignedDivisionByConstant(uint32_t d);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+SignedDivisionByConstant(uint64_t d);
+
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+UnsignedDivisionByConstant(uint32_t d, unsigned leading_zeros);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+UnsignedDivisionByConstant(uint64_t d, unsigned leading_zeros);
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/file-utils.cc b/deps/v8/src/base/file-utils.cc
index 2262df97d0..31b1b41190 100644
--- a/deps/v8/src/base/file-utils.cc
+++ b/deps/v8/src/base/file-utils.cc
@@ -10,13 +10,13 @@
#include "src/base/platform/platform.h"
namespace v8 {
-namespace internal {
+namespace base {
char* RelativePath(char** buffer, const char* exec_path, const char* name) {
DCHECK(exec_path);
int path_separator = static_cast<int>(strlen(exec_path)) - 1;
while (path_separator >= 0 &&
- !base::OS::isDirectorySeparator(exec_path[path_separator])) {
+ !OS::isDirectorySeparator(exec_path[path_separator])) {
path_separator--;
}
if (path_separator >= 0) {
@@ -32,5 +32,5 @@ char* RelativePath(char** buffer, const char* exec_path, const char* name) {
return *buffer;
}
-} // namespace internal
+} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
index ce9e9a1c41..271f0ffb05 100644
--- a/deps/v8/src/base/file-utils.h
+++ b/deps/v8/src/base/file-utils.h
@@ -2,17 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FILE_UTILS_H_
-#define V8_FILE_UTILS_H_
+#ifndef V8_BASE_FILE_UTILS_H_
+#define V8_BASE_FILE_UTILS_H_
+
+#include "src/base/base-export.h"
namespace v8 {
-namespace internal {
+namespace base {
// Helper functions to manipulate file paths.
-char* RelativePath(char** buffer, const char* exec_path, const char* name);
+V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
+ const char* name);
-} // namespace internal
+} // namespace base
} // namespace v8
#endif // V8_FILE_UTILS_H_
diff --git a/deps/v8/src/base/functional.h b/deps/v8/src/base/functional.h
index ff0d8075b9..634e7bac85 100644
--- a/deps/v8/src/base/functional.h
+++ b/deps/v8/src/base/functional.h
@@ -13,6 +13,7 @@
#include <functional>
#include <utility>
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -67,7 +68,7 @@ struct hash;
V8_INLINE size_t hash_combine() { return 0u; }
V8_INLINE size_t hash_combine(size_t seed) { return seed; }
-size_t hash_combine(size_t seed, size_t value);
+V8_BASE_EXPORT size_t hash_combine(size_t seed, size_t value);
template <typename T, typename... Ts>
V8_INLINE size_t hash_combine(T const& v, Ts const&... vs) {
return hash_combine(hash_combine(vs...), hash<T>()(v));
@@ -91,9 +92,9 @@ V8_BASE_HASH_VALUE_TRIVIAL(unsigned char)
V8_BASE_HASH_VALUE_TRIVIAL(unsigned short) // NOLINT(runtime/int)
#undef V8_BASE_HASH_VALUE_TRIVIAL
-size_t hash_value(unsigned int);
-size_t hash_value(unsigned long); // NOLINT(runtime/int)
-size_t hash_value(unsigned long long); // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned int);
+V8_BASE_EXPORT size_t hash_value(unsigned long); // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned long long); // NOLINT(runtime/int)
#define V8_BASE_HASH_VALUE_SIGNED(type) \
V8_INLINE size_t hash_value(signed type v) { \
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 54038c5ef3..d2fc1337a6 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -229,9 +229,8 @@ template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
// Mark all entries as empty.
- const Entry* end = map_end();
- for (Entry* entry = map_; entry < end; entry++) {
- entry->clear();
+ for (size_t i = 0; i < capacity_; ++i) {
+ map_[i].clear();
}
occupancy_ = 0;
}
@@ -264,19 +263,15 @@ typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
const Key& key, uint32_t hash) const {
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
- Entry* entry = map_ + (hash & (capacity_ - 1));
- const Entry* end = map_end();
- DCHECK(map_ <= entry && entry < end);
+ size_t i = hash & (capacity_ - 1);
+ DCHECK(i < capacity_);
DCHECK(occupancy_ < capacity_); // Guarantees loop termination.
- while (entry->exists() && !match_(hash, entry->hash, key, entry->key)) {
- entry++;
- if (entry >= end) {
- entry = map_;
- }
+ while (map_[i].exists() && !match_(hash, map_[i].hash, key, map_[i].key)) {
+ i = (i + 1) & (capacity_ - 1);
}
- return entry;
+ return &map_[i];
}
template <typename Key, typename Value, typename MatchFun,
diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h
index 80523a1414..72f3db15ef 100644
--- a/deps/v8/src/base/ieee754.h
+++ b/deps/v8/src/base/ieee754.h
@@ -5,73 +5,75 @@
#ifndef V8_BASE_IEEE754_H_
#define V8_BASE_IEEE754_H_
+#include "src/base/base-export.h"
+
namespace v8 {
namespace base {
namespace ieee754 {
// Returns the arc cosine of |x|; that is the value whose cosine is |x|.
-double acos(double x);
+V8_BASE_EXPORT double acos(double x);
// Returns the inverse hyperbolic cosine of |x|; that is the value whose
// hyperbolic cosine is |x|.
-double acosh(double x);
+V8_BASE_EXPORT double acosh(double x);
// Returns the arc sine of |x|; that is the value whose sine is |x|.
-double asin(double x);
+V8_BASE_EXPORT double asin(double x);
// Returns the inverse hyperbolic sine of |x|; that is the value whose
// hyperbolic sine is |x|.
-double asinh(double x);
+V8_BASE_EXPORT double asinh(double x);
// Returns the principal value of the arc tangent of |x|; that is the value
// whose tangent is |x|.
-double atan(double x);
+V8_BASE_EXPORT double atan(double x);
// Returns the principal value of the arc tangent of |y/x|, using the signs of
// the two arguments to determine the quadrant of the result.
-double atan2(double y, double x);
+V8_BASE_EXPORT double atan2(double y, double x);
// Returns the cosine of |x|, where |x| is given in radians.
-double cos(double x);
+V8_BASE_EXPORT double cos(double x);
// Returns the base-e exponential of |x|.
-double exp(double x);
+V8_BASE_EXPORT double exp(double x);
-double atanh(double x);
+V8_BASE_EXPORT double atanh(double x);
// Returns the natural logarithm of |x|.
-double log(double x);
+V8_BASE_EXPORT double log(double x);
// Returns a value equivalent to |log(1+x)|, but computed in a way that is
// accurate even if the value of |x| is near zero.
-double log1p(double x);
+V8_BASE_EXPORT double log1p(double x);
// Returns the base 2 logarithm of |x|.
-double log2(double x);
+V8_BASE_EXPORT double log2(double x);
// Returns the base 10 logarithm of |x|.
-double log10(double x);
+V8_BASE_EXPORT double log10(double x);
// Returns the cube root of |x|.
-double cbrt(double x);
+V8_BASE_EXPORT double cbrt(double x);
// Returns exp(x)-1, the exponential of |x| minus 1.
-double expm1(double x);
+V8_BASE_EXPORT double expm1(double x);
// Returns the sine of |x|, where |x| is given in radians.
-double sin(double x);
+V8_BASE_EXPORT double sin(double x);
// Returns the tangent of |x|, where |x| is given in radians.
-double tan(double x);
+V8_BASE_EXPORT double tan(double x);
// Returns the hyperbolic cosine of |x|, where |x| is given radians.
-double cosh(double x);
+V8_BASE_EXPORT double cosh(double x);
// Returns the hyperbolic sine of |x|, where |x| is given radians.
-double sinh(double x);
+V8_BASE_EXPORT double sinh(double x);
// Returns the hyperbolic tangent of |x|, where |x| is given radians.
-double tanh(double x);
+V8_BASE_EXPORT double tanh(double x);
} // namespace ieee754
} // namespace base
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 50fceca88b..7bbb82a485 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -9,10 +9,11 @@
#include <sstream>
#include <string>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
-extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN
+extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -87,8 +88,8 @@ std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
// in logging.cc.
-#define DEFINE_MAKE_CHECK_OP_STRING(type) \
- extern template std::string* MakeCheckOpString<type, type>( \
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
type const&, type const&, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
@@ -117,10 +118,11 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
char const* msg) { \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
} \
- extern template std::string* Check##NAME##Impl<float, float>( \
+ extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
float const& lhs, float const& rhs, char const* msg); \
- extern template std::string* Check##NAME##Impl<double, double>( \
- double const& lhs, double const& rhs, char const* msg);
+ extern template V8_BASE_EXPORT std::string* \
+ Check##NAME##Impl<double, double>(double const& lhs, double const& rhs, \
+ char const* msg);
DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=)
DEFINE_CHECK_OP_IMPL(LE, <=)
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index 790a8866e0..8008812d75 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -55,6 +55,7 @@
#include <stddef.h>
#include "src/base/atomicops.h"
+#include "src/base/base-export.h"
namespace v8 {
namespace base {
@@ -79,7 +80,8 @@ struct OneArgFunction {
typedef void (*type)(T);
};
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
+ void* arg);
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 72d6f28507..48e7c369ca 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
@@ -28,7 +29,7 @@ class TimeDelta;
// the mutex and suspend the execution of the calling thread. When the condition
// variable is notified, the thread is awakened, and the mutex is reacquired.
-class ConditionVariable final {
+class V8_BASE_EXPORT ConditionVariable final {
public:
ConditionVariable();
~ConditionVariable();
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 61df19d66a..e7231bdd9e 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_MUTEX_H_
#define V8_BASE_PLATFORM_MUTEX_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
@@ -33,7 +34,7 @@ namespace base {
// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
// while still owned by some thread. The Mutex class is non-copyable.
-class Mutex final {
+class V8_BASE_EXPORT Mutex final {
public:
Mutex();
~Mutex();
@@ -127,7 +128,7 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
// The behavior of a program is undefined if a recursive mutex is destroyed
// while still owned by some thread. The RecursiveMutex class is non-copyable.
-class RecursiveMutex final {
+class V8_BASE_EXPORT RecursiveMutex final {
public:
RecursiveMutex();
~RecursiveMutex();
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index d3b6c9c1cf..5d570e7048 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -25,6 +25,7 @@
#include <string>
#include <vector>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
@@ -69,7 +70,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#define V8_FAST_TLS_SUPPORTED 1
-extern intptr_t kMacTlsBaseOffset;
+extern V8_BASE_EXPORT intptr_t kMacTlsBaseOffset;
INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
@@ -102,7 +103,7 @@ class TimezoneCache;
// functions. Add methods here to cope with differences between the
// supported platforms.
-class OS {
+class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
// - random_seed: Used for the GetRandomMmapAddress() if non-zero.
@@ -211,7 +212,7 @@ class OS {
char text[kStackWalkMaxTextLen];
};
- class MemoryMappedFile {
+ class V8_BASE_EXPORT MemoryMappedFile {
public:
virtual ~MemoryMappedFile() {}
virtual void* memory() const = 0;
@@ -286,7 +287,7 @@ class OS {
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory
// from the original object.
-class VirtualMemory {
+class V8_BASE_EXPORT VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
@@ -418,7 +419,7 @@ class VirtualMemory {
// thread. The Thread object should not be deallocated before the thread has
// terminated.
-class Thread {
+class V8_BASE_EXPORT Thread {
public:
// Opaque data type for thread-local storage keys.
typedef int32_t LocalStorageKey;
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 39029c83fc..31aeca3d9b 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
#define V8_BASE_PLATFORM_SEMAPHORE_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
@@ -31,7 +32,7 @@ class TimeDelta;
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
-class Semaphore final {
+class V8_BASE_EXPORT Semaphore final {
public:
explicit Semaphore(int count);
~Semaphore();
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index be62014f91..ed1751268f 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -9,6 +9,7 @@
#include <iosfwd>
#include <limits>
+#include "src/base/base-export.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/safe_math.h"
@@ -42,7 +43,7 @@ class TimeBase;
// This class represents a duration of time, internally represented in
// microseonds.
-class TimeDelta final {
+class V8_BASE_EXPORT TimeDelta final {
public:
TimeDelta() : delta_(0) {}
@@ -277,7 +278,7 @@ class TimeBase {
// This class represents an absolute point in time, internally represented as
// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
-class Time final : public time_internal::TimeBase<Time> {
+class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
// Contains the NULL time. Use Time::Now() to get the current time.
Time() : TimeBase(0) {}
@@ -322,7 +323,7 @@ class Time final : public time_internal::TimeBase<Time> {
explicit Time(int64_t us) : TimeBase(us) {}
};
-std::ostream& operator<<(std::ostream&, const Time&);
+V8_BASE_EXPORT std::ostream& operator<<(std::ostream&, const Time&);
inline Time operator+(const TimeDelta& delta, const Time& time) {
return time + delta;
@@ -339,7 +340,8 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
// Time::Now() may actually decrease or jump). But note that TimeTicks may
// "stand still", for example if the computer suspended.
-class TimeTicks final : public time_internal::TimeBase<TimeTicks> {
+class V8_BASE_EXPORT TimeTicks final
+ : public time_internal::TimeBase<TimeTicks> {
public:
TimeTicks() : TimeBase(0) {}
@@ -376,7 +378,8 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
// Represents a clock, specific to a particular thread, than runs only while the
// thread is running.
-class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
+class V8_BASE_EXPORT ThreadTicks final
+ : public time_internal::TimeBase<ThreadTicks> {
public:
ThreadTicks() : TimeBase(0) {}
@@ -408,6 +411,9 @@ class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
#endif
private:
+ template <class TimeClass>
+ friend class time_internal::TimeBase;
+
// Please use Now() or GetForThread() to create a new object. This is for
// internal use and testing. Ticks are in microseconds.
explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
diff --git a/deps/v8/src/base/ring-buffer.h b/deps/v8/src/base/ring-buffer.h
new file mode 100644
index 0000000000..b347977640
--- /dev/null
+++ b/deps/v8/src/base/ring-buffer.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_RING_BUFFER_H_
+#define V8_BASE_RING_BUFFER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <typename T>
+class RingBuffer {
+ public:
+ RingBuffer() { Reset(); }
+ static const int kSize = 10;
+ void Push(const T& value) {
+ if (count_ == kSize) {
+ elements_[start_++] = value;
+ if (start_ == kSize) start_ = 0;
+ } else {
+ DCHECK_EQ(start_, 0);
+ elements_[count_++] = value;
+ }
+ }
+
+ int Count() const { return count_; }
+
+ template <typename Callback>
+ T Sum(Callback callback, const T& initial) const {
+ int j = start_ + count_ - 1;
+ if (j >= kSize) j -= kSize;
+ T result = initial;
+ for (int i = 0; i < count_; i++) {
+ result = callback(result, elements_[j]);
+ if (--j == -1) j += kSize;
+ }
+ return result;
+ }
+
+ void Reset() { start_ = count_ = 0; }
+
+ private:
+ T elements_[kSize];
+ int start_;
+ int count_;
+ DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_RING_BUFFER_H_
diff --git a/deps/v8/src/base/sys-info.h b/deps/v8/src/base/sys-info.h
index 4504c82e37..772f44336f 100644
--- a/deps/v8/src/base/sys-info.h
+++ b/deps/v8/src/base/sys-info.h
@@ -6,12 +6,14 @@
#define V8_BASE_SYS_INFO_H_
#include <stdint.h>
+
+#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
namespace v8 {
namespace base {
-class SysInfo final {
+class V8_BASE_EXPORT SysInfo final {
public:
// Returns the number of logical processors/core on the current machine.
static int NumberOfProcessors();
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index cd3e6bfdc8..7a322b5332 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -31,7 +32,7 @@ namespace base {
// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
-class RandomNumberGenerator final {
+class V8_BASE_EXPORT RandomNumberGenerator final {
public:
// EntropySource is used as a callback function when V8 needs a source of
// entropy.