summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-12 11:44:13 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-12 22:10:34 +0200
commit58f3fdcccde30c115e68e7b9877f55bad1984545 (patch)
tree6cd67ba3ea70b2f25e7a9ed5c9339986aca44d6d
parenteba7d2db7fcb6f186e1da1327884a481d5c1d743 (diff)
downloadnode-new-58f3fdcccde30c115e68e7b9877f55bad1984545.tar.gz
deps: V8: cherry-pick semver-major commits from 10.2
Includes the following commits: commit b2978927d8a96ebc814cccbc5a9f1c35910ee621 Remove dynamic map checks and custom deoptimization kinds This CL removes: - Dynamic map checks aka minimorphic property loads (TF support, builtins). - "Bailout" deopts (= drop to the interpreter once, but don't throw out optimized code). - "EagerWithResume" deopts (= part of dynamic map check functionality, we call a builtin for the deopt check and deopt or resume based on the result). Fixed: v8:12552 Change-Id: I492cf1667e0f54586690b2f72a65ea804224b840 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3401585 commit f52f934119635058b179c2359fe070b8ee0f9233 PPC/s390: Remove dynamic map checks and custom deoptimization kinds Port b2978927d8a96ebc814cccbc5a9f1c35910ee621 Original Commit Message: This CL removes: - Dynamic map checks aka minimorphic property loads (TF support, builtins). - "Bailout" deopts (= drop to the interpreter once, but don't throw out optimized code). - "EagerWithResume" deopts (= part of dynamic map check functionality, we call a builtin for the deopt check and deopt or resume based on the result). R=jgruber@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com BUG= LOG=N Change-Id: I64476f73810774c2c592231d82c4a2cbfa2bf94e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3537881 commit 38940b70986da6b43d18cb8cf8f0a3be36ca9010 [loong64][mips] Remove dynamic map checks and custom deoptimization kinds Port commit b2978927d8a96ebc814cccbc5a9f1c35910ee621 Fixed: v8:12552 Change-Id: Ic2fbded9a662ed840a0350e3ce049e147fbf03a0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3541527 commit da5b5f66a6bd27df6249602378300c6961bc62b4 [riscv64] Remove dynamic map checks and custom deoptimization kinds Port b2978927d8a96ebc814cccbc5a9f1c35910ee621 Bug: v8:12552 Change-Id: I73e76fc5cc8905a0fbfc801b2f794735866d19e8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3544725 commit ffae028b37991566c080c5528463f7d16017668c Forward deprecation for resurrecting finalizer Bug: v8:12672 Change-Id: Ib4f53086436e028b4ea32fbc960f57e91709d184 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3532256 commit f6386018d472665e36d662c8b159d95325999d69 [api] Remove TracedGlobal<> Remove deprecated TracedGlobal<>, greatly simplifying handling of traced references in general. Also saves a word per v8::TracedReference as there's no need to keep a possible callback around. Bug: v8:12603 Change-Id: Ice35d7906775b912d02e97a27a722b3e1cec28d9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3532251 commit a8beac553b0a1639bc9790c2d6f82caf6b2e150f Deprecate some signature checks Deprecate signature checks in * Template::SetNativeDataProperty * ObjectTemplate::SetAccessor These are not used in Chrome and require some complicated check in the IC code, which we want to remove. Change-Id: I413fafc8658e922fd590e7fe200600a624f019a6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3557253 commit cff2b5000a1aa417a9c4499bcfa3ffda4542f4f1 Deprecate signature checks in Set{Accessor,NativeDataProperty} Change from V8_DEPRECATE_SOON to V8_DEPRECATED. It turned out that we don't have to make changes in chrome code, so we can go to deprecated right away. Bug: chromium:1310790 Change-Id: I1bd529536d3a0098f11f13b3e44fe3dbc80eed04 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571897 commit 9238afb0c0ee52c9111a7e9f2f055137628771ad Allow embedder to set global OOM handler Embedders can currently specify a callback for OOM errors during Isolate initialization. However, there are cases where an OOM error can be thrown in a context where we don't have access to an Isolate, for example on a task posted to a worker thread. This CL introduces an initialization API to allow the embedder to specify a process-wide OOM callback. Bug: chromium:614440 Change-Id: I326753d80767679f677e85104d9edeef92e19086 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3561916 commit ca51ae3ac8b468509603633adb6ee3b3be9306ec [api][profiler] Get StartProfiling, StopProfiling to accept integer ID rather than string This CL adds support for interacting with CpuProfile with their integer id. A String ID is problematic because it forces an allocation when stopping or cancelling a Profiler which can happen during a GC when this is not allowed. Change-Id: I9a8e754bd67214be0bbc5ca051bcadf52bf71a68 Bug: chromium:1297283 Co-Authored-By: Nicolas Dubus <nicodubus@fb.com> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3522896 Refs: https://github.com/v8/v8/commit/b2978927d8a96ebc814cccbc5a9f1c35910ee621 Refs: https://github.com/v8/v8/commit/f52f934119635058b179c2359fe070b8ee0f9233 Refs: https://github.com/v8/v8/commit/38940b70986da6b43d18cb8cf8f0a3be36ca9010 Refs: https://github.com/v8/v8/commit/da5b5f66a6bd27df6249602378300c6961bc62b4 Refs: https://github.com/v8/v8/commit/ffae028b37991566c080c5528463f7d16017668c Refs: https://github.com/v8/v8/commit/f6386018d472665e36d662c8b159d95325999d69 Refs: https://github.com/v8/v8/commit/a8beac553b0a1639bc9790c2d6f82caf6b2e150f Refs: https://github.com/v8/v8/commit/cff2b5000a1aa417a9c4499bcfa3ffda4542f4f1 Refs: https://github.com/v8/v8/commit/9238afb0c0ee52c9111a7e9f2f055137628771ad Refs: https://github.com/v8/v8/commit/ca51ae3ac8b468509603633adb6ee3b3be9306ec PR-URL: https://github.com/nodejs/node/pull/42657 Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
-rw-r--r--common.gypi2
-rw-r--r--deps/v8/BUILD.bazel1
-rw-r--r--deps/v8/BUILD.gn1
-rw-r--r--deps/v8/include/v8-embedder-heap.h31
-rw-r--r--deps/v8/include/v8-initialization.h17
-rw-r--r--deps/v8/include/v8-internal.h4
-rw-r--r--deps/v8/include/v8-local-handle.h4
-rw-r--r--deps/v8/include/v8-object.h2
-rw-r--r--deps/v8/include/v8-profiler.h66
-rw-r--r--deps/v8/include/v8-template.h35
-rw-r--r--deps/v8/include/v8-traced-handle.h253
-rw-r--r--deps/v8/include/v8-weak-callback-info.h4
-rw-r--r--deps/v8/src/api/api.cc144
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc72
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc88
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h8
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins.h4
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc71
-rw-r--r--deps/v8/src/builtins/ic-dynamic-check-maps.tq110
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc72
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc72
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc71
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc74
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc72
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc74
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc72
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h12
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc6
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h12
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc6
-rw-r--r--deps/v8/src/codegen/bailout-reason.h157
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h14
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc10
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h35
-rw-r--r--deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h12
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc6
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h12
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc5
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h12
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc6
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h12
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h12
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h12
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc5
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h24
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc10
-rw-r--r--deps/v8/src/common/globals.h20
-rw-r--r--deps/v8/src/compiler/access-info.cc37
-rw-r--r--deps/v8/src/compiler/access-info.h34
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc33
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc86
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h6
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc42
-rw-r--r--deps/v8/src/compiler/common-operator.cc32
-rw-r--r--deps/v8/src/compiler/common-operator.h25
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc54
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc12
-rw-r--r--deps/v8/src/compiler/graph-assembler.h4
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc5
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc55
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h7
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc54
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h4
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc1
-rw-r--r--deps/v8/src/compiler/opcodes.h44
-rw-r--r--deps/v8/src/compiler/processed-feedback.h25
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc20
-rw-r--r--deps/v8/src/compiler/property-access-builder.h6
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc42
-rw-r--r--deps/v8/src/compiler/simplified-operator.h38
-rw-r--r--deps/v8/src/compiler/typer.cc2
-rw-r--r--deps/v8/src/compiler/verifier.cc5
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc7
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc6
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h2
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc54
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h9
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc6
-rw-r--r--deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc7
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc6
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc6
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc7
-rw-r--r--deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc6
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc7
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc6
-rw-r--r--deps/v8/src/handles/global-handles.cc213
-rw-r--r--deps/v8/src/handles/global-handles.h12
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc4
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc8
-rw-r--r--deps/v8/src/heap/embedder-tracing.h4
-rw-r--r--deps/v8/src/objects/code-inl.h2
-rw-r--r--deps/v8/src/objects/code.h4
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc33
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h10
-rw-r--r--deps/v8/src/profiler/profile-generator.cc96
-rw-r--r--deps/v8/src/profiler/profile-generator.h22
-rw-r--r--deps/v8/test/cctest/cctest.status13
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc609
-rw-r--r--deps/v8/test/cctest/test-access-checks.cc18
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc15
-rw-r--r--deps/v8/test/cctest/test-api.cc195
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc23
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc105
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc94
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc23
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm64.cc34
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-loong64.cc23
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc23
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc23
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv64.cc27
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc23
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc38
116 files changed, 935 insertions, 3418 deletions
diff --git a/common.gypi b/common.gypi
index 33cd361e67..f2493726ef 100644
--- a/common.gypi
+++ b/common.gypi
@@ -36,7 +36,7 @@
# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
- 'v8_embedder_string': '-node.12',
+ 'v8_embedder_string': '-node.13',
##### V8 defaults for Node.js #####
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index a632cc4fe8..bc18ab8c27 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -740,7 +740,6 @@ filegroup(
"src/builtins/function.tq",
"src/builtins/growable-fixed-array.tq",
"src/builtins/ic-callable.tq",
- "src/builtins/ic-dynamic-check-maps.tq",
"src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 7b19ee86d8..9e801d2455 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -1670,7 +1670,6 @@ torque_files = [
"src/builtins/function.tq",
"src/builtins/growable-fixed-array.tq",
"src/builtins/ic-callable.tq",
- "src/builtins/ic-dynamic-check-maps.tq",
"src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h
index 43f96d7f0a..09dbae1fd8 100644
--- a/deps/v8/include/v8-embedder-heap.h
+++ b/deps/v8/include/v8-embedder-heap.h
@@ -34,29 +34,22 @@ class V8_EXPORT EmbedderRootsHandler {
virtual ~EmbedderRootsHandler() = default;
/**
- * Returns true if the TracedGlobal handle should be considered as root for
- * the currently running non-tracing garbage collection and false otherwise.
- * The default implementation will keep all TracedGlobal references as roots.
+ * Returns true if the |TracedReference| handle should be considered as root
+ * for the currently running non-tracing garbage collection and false
+ * otherwise. The default implementation will keep all |TracedReference|
+ * references as roots.
*
* If this returns false, then V8 may decide that the object referred to by
- * such a handle is reclaimed. In that case:
- * - No action is required if handles are used with destructors, i.e., by just
- * using |TracedGlobal|.
- * - When run without destructors, i.e., by using |TracedReference|, V8 calls
- * |ResetRoot|.
+ * such a handle is reclaimed. In that case, V8 calls |ResetRoot()| for the
+ * |TracedReference|.
*
- * Note that the |handle| is different from the handle that the embedder holds
+ * Note that the `handle` is different from the handle that the embedder holds
* for retaining the object. The embedder may use |WrapperClassId()| to
* distinguish cases where it wants handles to be treated as roots from not
* being treated as roots.
*/
virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
- V8_DEPRECATED("See v8::TracedGlobal class comment.")
- virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) {
- return true;
- }
-
/**
* Used in combination with |IsRoot|. Called by V8 when an
* object that is backed by a handle is reclaimed by a non-tracing garbage
@@ -87,13 +80,11 @@ class V8_EXPORT EmbedderHeapTracer {
};
/**
- * Interface for iterating through TracedGlobal handles.
+ * Interface for iterating through |TracedReference| handles.
*/
class V8_EXPORT TracedGlobalHandleVisitor {
public:
virtual ~TracedGlobalHandleVisitor() = default;
- V8_DEPRECATED("See v8::TracedGlobal class comment.")
- virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
};
@@ -118,8 +109,8 @@ class V8_EXPORT EmbedderHeapTracer {
virtual ~EmbedderHeapTracer() = default;
/**
- * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
- * attached to.
+ * Iterates all |TracedReference| handles created for the |v8::Isolate| the
+ * tracer is attached to.
*/
void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
@@ -194,8 +185,6 @@ class V8_EXPORT EmbedderHeapTracer {
*/
virtual bool IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
- V8_DEPRECATED("See v8::TracedGlobal class comment.")
- virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
/**
* See documentation on EmbedderRootsHandler.
diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h
index 99022cec45..3d59c73f7c 100644
--- a/deps/v8/include/v8-initialization.h
+++ b/deps/v8/include/v8-initialization.h
@@ -8,10 +8,11 @@
#include <stddef.h>
#include <stdint.h>
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8-isolate.h" // NOLINT(build/include_directory)
-#include "v8-platform.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-callbacks.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@@ -276,6 +277,14 @@ class V8_EXPORT V8 {
#endif
/**
+ * Allows the host application to provide a callback that will be called when
+ * v8 has encountered a fatal failure to allocate memory and is about to
+ * terminate.
+ */
+
+ static void SetFatalMemoryErrorCallback(OOMErrorCallback oom_error_callback);
+
+ /**
* Get statistics about the shared memory usage.
*/
static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 196518a2db..e6e9cc5f9f 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -365,8 +365,8 @@ class Internals {
static const uint32_t kNumIsolateDataSlots = 4;
static const int kStackGuardSize = 7 * kApiSystemPointerSize;
- static const int kBuiltinTier0EntryTableSize = 13 * kApiSystemPointerSize;
- static const int kBuiltinTier0TableSize = 13 * kApiSystemPointerSize;
+ static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
+ static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
// IsolateData layout guarantees.
static const int kIsolateCageBaseOffset = 0;
diff --git a/deps/v8/include/v8-local-handle.h b/deps/v8/include/v8-local-handle.h
index 66a8e93af6..5ae974081f 100644
--- a/deps/v8/include/v8-local-handle.h
+++ b/deps/v8/include/v8-local-handle.h
@@ -46,8 +46,6 @@ class String;
template <class F>
class Traced;
template <class F>
-class TracedGlobal;
-template <class F>
class TracedReference;
class TracedReferenceBase;
class Utils;
@@ -312,8 +310,6 @@ class Local {
template <class F>
friend class Traced;
template <class F>
- friend class TracedGlobal;
- template <class F>
friend class BasicTracedReference;
template <class F>
friend class TracedReference;
diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h
index 11ff03dd20..bad299fc42 100644
--- a/deps/v8/include/v8-object.h
+++ b/deps/v8/include/v8-object.h
@@ -493,7 +493,7 @@ class V8_EXPORT Object : public Value {
return object.val_->GetAlignedPointerFromInternalField(index);
}
- /** Same as above, but works for TracedGlobal. */
+ /** Same as above, but works for TracedReference. */
V8_INLINE static void* GetAlignedPointerFromInternalField(
const BasicTracedReference<Object>& object, int index) {
return object->GetAlignedPointerFromInternalField(index);
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index c9a2704f7b..2681040732 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -28,6 +28,7 @@ enum StateTag : int;
using NativeObject = void*;
using SnapshotObjectId = uint32_t;
+using ProfilerId = uint32_t;
struct CpuProfileDeoptFrame {
int script_id;
@@ -274,14 +275,32 @@ enum class CpuProfilingStatus {
};
/**
+ * Result from StartProfiling returning the Profiling Status, and
+ * id of the started profiler, or 0 if profiler is not started
+ */
+struct CpuProfilingResult {
+ const ProfilerId id;
+ const CpuProfilingStatus status;
+};
+
+/**
* Delegate for when max samples reached and samples are discarded.
*/
class V8_EXPORT DiscardedSamplesDelegate {
public:
- DiscardedSamplesDelegate() {}
+ DiscardedSamplesDelegate() = default;
virtual ~DiscardedSamplesDelegate() = default;
virtual void Notify() = 0;
+
+ ProfilerId GetId() const { return profiler_id_; }
+
+ private:
+ friend internal::CpuProfile;
+
+ void SetId(ProfilerId id) { profiler_id_ = id; }
+
+ ProfilerId profiler_id_;
};
/**
@@ -372,6 +391,45 @@ class V8_EXPORT CpuProfiler {
void SetUsePreciseSampling(bool);
/**
+ * Starts collecting a CPU profile. Several profiles may be collected at once.
+ * Generates an anonymous profiler, without a String identifier.
+ */
+ CpuProfilingResult Start(
+ CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
+
+ /**
+ * Starts collecting a CPU profile. Title may be an empty string. Several
+ * profiles may be collected at once. Attempts to start collecting several
+ * profiles with the same title are silently ignored.
+ */
+ CpuProfilingResult Start(
+ Local<String> title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
+
+ /**
+ * Starts profiling with the same semantics as above, except with expanded
+ * parameters.
+ *
+ * |record_samples| parameter controls whether individual samples should
+ * be recorded in addition to the aggregated tree.
+ *
+ * |max_samples| controls the maximum number of samples that should be
+ * recorded by the profiler. Samples obtained after this limit will be
+ * discarded.
+ */
+ CpuProfilingResult Start(
+ Local<String> title, CpuProfilingMode mode, bool record_samples = false,
+ unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
+
+ /**
+ * The same as StartProfiling above, but the CpuProfilingMode defaults to
+ * kLeafNodeLineNumbers mode, which was the previous default behavior of the
+ * profiler.
+ */
+ CpuProfilingResult Start(Local<String> title, bool record_samples = false);
+
+ /**
* Starts collecting a CPU profile. Title may be an empty string. Several
* profiles may be collected at once. Attempts to start collecting several
* profiles with the same title are silently ignored.
@@ -394,6 +452,7 @@ class V8_EXPORT CpuProfiler {
CpuProfilingStatus StartProfiling(
Local<String> title, CpuProfilingMode mode, bool record_samples = false,
unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
+
/**
* The same as StartProfiling above, but the CpuProfilingMode defaults to
* kLeafNodeLineNumbers mode, which was the previous default behavior of the
@@ -403,6 +462,11 @@ class V8_EXPORT CpuProfiler {
bool record_samples = false);
/**
+ * Stops collecting CPU profile with a given id and returns it.
+ */
+ CpuProfile* Stop(ProfilerId id);
+
+ /**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h
index 96fcab6074..0afdccaafb 100644
--- a/deps/v8/include/v8-template.h
+++ b/deps/v8/include/v8-template.h
@@ -89,11 +89,26 @@ class V8_EXPORT Template : public Data {
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/
+ V8_DEPRECATED("Do signature check in accessor")
+ void SetNativeDataProperty(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter, Local<Value> data,
+ PropertyAttribute attribute, Local<AccessorSignature> signature,
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ V8_DEPRECATED("Do signature check in accessor")
+ void SetNativeDataProperty(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter, Local<Value> data,
+ PropertyAttribute attribute, Local<AccessorSignature> signature,
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetNativeDataProperty(
Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
@@ -101,7 +116,6 @@ class V8_EXPORT Template : public Data {
Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
@@ -813,12 +827,26 @@ class V8_EXPORT ObjectTemplate : public Template {
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/
+ V8_DEPRECATED("Do signature check in accessor")
+ void SetAccessor(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter, Local<Value> data, AccessControl settings,
+ PropertyAttribute attribute, Local<AccessorSignature> signature,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ V8_DEPRECATED("Do signature check in accessor")
+ void SetAccessor(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter, Local<Value> data,
+ AccessControl settings, PropertyAttribute attribute,
+ Local<AccessorSignature> signature,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetAccessor(
Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetAccessor(
@@ -826,7 +854,6 @@ class V8_EXPORT ObjectTemplate : public Template {
AccessorNameSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h
index 2dcd1d1fb9..7719b9bc01 100644
--- a/deps/v8/include/v8-traced-handle.h
+++ b/deps/v8/include/v8-traced-handle.h
@@ -27,8 +27,6 @@ namespace internal {
class BasicTracedReferenceExtractor;
-enum class GlobalHandleDestructionMode { kWithDestructor, kWithoutDestructor };
-
enum class GlobalHandleStoreMode {
kInitializingStore,
kAssigningStore,
@@ -36,25 +34,15 @@ enum class GlobalHandleStoreMode {
V8_EXPORT internal::Address* GlobalizeTracedReference(
internal::Isolate* isolate, internal::Address* handle,
- internal::Address* slot, GlobalHandleDestructionMode destruction_mode,
- GlobalHandleStoreMode store_mode);
-V8_EXPORT void MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to);
-V8_EXPORT void CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to);
-V8_EXPORT void DisposeTracedGlobal(internal::Address* global_handle);
-V8_EXPORT void SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback);
+ internal::Address* slot, GlobalHandleStoreMode store_mode);
+V8_EXPORT void MoveTracedReference(internal::Address** from,
+ internal::Address** to);
+V8_EXPORT void CopyTracedReference(const internal::Address* const* from,
+ internal::Address** to);
+V8_EXPORT void DisposeTracedReference(internal::Address* global_handle);
} // namespace internal
-/**
- * Deprecated. Use |TracedReference<T>| instead.
- */
-template <typename T>
-struct TracedGlobalTrait {};
-
class TracedReferenceBase {
public:
/**
@@ -138,9 +126,8 @@ class TracedReferenceBase {
* |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
* be treated as root or not.
*
- * Note that the base class cannot be instantiated itself. Choose from
- * - TracedGlobal
- * - TracedReference
+ * Note that the base class cannot be instantiated itself, use |TracedReference|
+ * instead.
*/
template <typename T>
class BasicTracedReference : public TracedReferenceBase {
@@ -177,7 +164,6 @@ class BasicTracedReference : public TracedReferenceBase {
V8_INLINE static internal::Address* New(
Isolate* isolate, T* that, void* slot,
- internal::GlobalHandleDestructionMode destruction_mode,
internal::GlobalHandleStoreMode store_mode);
friend class EmbedderHeapTracer;
@@ -185,8 +171,6 @@ class BasicTracedReference : public TracedReferenceBase {
friend class Local;
friend class Object;
template <typename F>
- friend class TracedGlobal;
- template <typename F>
friend class TracedReference;
template <typename F>
friend class BasicTracedReference;
@@ -195,146 +179,6 @@ class BasicTracedReference : public TracedReferenceBase {
};
/**
- * A traced handle with destructor that clears the handle. For more details see
- * BasicTracedReference.
- *
- * This type is being deprecated and embedders are encouraged to use
- * `v8::TracedReference` in combination with `v8::CppHeap`. If this is not
- * possible, the following provides feature parity:
- *
- * \code
- * template <typename T>
- * struct TracedGlobalPolyfill {
- * v8::TracedReference<T> traced_reference;
- * v8::Global<T> weak_reference_for_callback;
- * };
- * \endcode
- *
- * In this example, `weak_reference_for_callback` can be used to emulate
- * `SetFinalizationCallback()`.
- */
-template <typename T>
-class TracedGlobal : public BasicTracedReference<T> {
- public:
- using BasicTracedReference<T>::Reset;
-
- /**
- * Destructor resetting the handle.Is
- */
- ~TracedGlobal() { this->Reset(); }
-
- /**
- * An empty TracedGlobal without storage cell.
- */
- V8_DEPRECATED("See class comment.")
- TracedGlobal() : BasicTracedReference<T>() {}
-
- /**
- * Construct a TracedGlobal from a Local.
- *
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object.
- */
- template <class S>
- V8_DEPRECATED("See class comment.")
- TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ =
- this->New(isolate, that.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithDestructor,
- internal::GlobalHandleStoreMode::kInitializingStore);
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(TracedGlobal&& other) noexcept {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(TracedGlobal<S>&& other) noexcept {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(const TracedGlobal& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs) noexcept;
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs) noexcept;
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
-
- /**
- * If non-empty, destroy the underlying storage cell and create a new one with
- * the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- template <class S>
- V8_INLINE TracedGlobal<S>& As() const {
- return reinterpret_cast<TracedGlobal<S>&>(
- const_cast<TracedGlobal<T>&>(*this));
- }
-
- /**
- * Adds a finalization callback to the handle. The type of this callback is
- * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
- * parameter and the first two internal fields of the object.
- *
- * The callback is then supposed to reset the handle in the callback. No
- * further V8 API may be called in this callback. In case additional work
- * involving V8 needs to be done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- V8_INLINE void SetFinalizationCallback(
- void* parameter, WeakCallbackInfo<void>::Callback callback);
-};
-
-/**
* A traced handle without destructor that clears the handle. The embedder needs
* to ensure that the handle is not accessed once the V8 object has been
* reclaimed. This can happen when the handle is not passed through the
@@ -363,10 +207,8 @@ class TracedReference : public BasicTracedReference<T> {
*/
template <class S>
TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ =
- this->New(isolate, that.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithoutDestructor,
- internal::GlobalHandleStoreMode::kInitializingStore);
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ internal::GlobalHandleStoreMode::kInitializingStore);
static_assert(std::is_base_of<T, S>::value, "type check");
}
@@ -409,23 +251,23 @@ class TracedReference : public BasicTracedReference<T> {
}
/**
- * Move assignment operator initializing TracedGlobal from an existing one.
+ * Move assignment operator initializing TracedReference from an existing one.
*/
V8_INLINE TracedReference& operator=(TracedReference&& rhs) noexcept;
/**
- * Move assignment operator initializing TracedGlobal from an existing one.
+ * Move assignment operator initializing TracedReference from an existing one.
*/
template <class S>
V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs) noexcept;
/**
- * Copy assignment operator initializing TracedGlobal from an existing one.
+ * Copy assignment operator initializing TracedReference from an existing one.
*/
V8_INLINE TracedReference& operator=(const TracedReference& rhs);
/**
- * Copy assignment operator initializing TracedGlobal from an existing one.
+ * Copy assignment operator initializing TracedReference from an existing one.
*/
template <class S>
V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
@@ -448,18 +290,17 @@ class TracedReference : public BasicTracedReference<T> {
template <class T>
internal::Address* BasicTracedReference<T>::New(
Isolate* isolate, T* that, void* slot,
- internal::GlobalHandleDestructionMode destruction_mode,
internal::GlobalHandleStoreMode store_mode) {
if (that == nullptr) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
return internal::GlobalizeTracedReference(
reinterpret_cast<internal::Isolate*>(isolate), p,
- reinterpret_cast<internal::Address*>(slot), destruction_mode, store_mode);
+ reinterpret_cast<internal::Address*>(slot), store_mode);
}
void TracedReferenceBase::Reset() {
if (IsEmpty()) return;
- internal::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
+ internal::DisposeTracedReference(reinterpret_cast<internal::Address*>(val_));
SetSlotThreadSafe(nullptr);
}
@@ -513,7 +354,6 @@ void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
if (other.IsEmpty()) return;
this->SetSlotThreadSafe(
this->New(isolate, other.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithoutDestructor,
internal::GlobalHandleStoreMode::kAssigningStore));
}
@@ -539,7 +379,7 @@ template <class T>
TracedReference<T>& TracedReference<T>::operator=(
TracedReference&& rhs) noexcept {
if (this != &rhs) {
- internal::MoveTracedGlobalReference(
+ internal::MoveTracedReference(
reinterpret_cast<internal::Address**>(&rhs.val_),
reinterpret_cast<internal::Address**>(&this->val_));
}
@@ -551,7 +391,7 @@ TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
if (this != &rhs) {
this->Reset();
if (rhs.val_ != nullptr) {
- internal::CopyTracedGlobalReference(
+ internal::CopyTracedReference(
reinterpret_cast<const internal::Address* const*>(&rhs.val_),
reinterpret_cast<internal::Address**>(&this->val_));
}
@@ -575,63 +415,6 @@ uint16_t TracedReferenceBase::WrapperClassId() const {
return *reinterpret_cast<uint16_t*>(addr);
}
-template <class T>
-template <class S>
-void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = this->New(isolate, other.val_, &this->val_,
- internal::GlobalHandleDestructionMode::kWithDestructor,
- internal::GlobalHandleStoreMode::kAssigningStore);
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) noexcept {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = std::move(rhs.template As<T>());
- return *this;
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = rhs.template As<T>();
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) noexcept {
- if (this != &rhs) {
- internal::MoveTracedGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- internal::CopyTracedGlobalReference(
- reinterpret_cast<const internal::Address* const*>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- }
- return *this;
-}
-
-template <class T>
-void TracedGlobal<T>::SetFinalizationCallback(
- void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
- internal::SetFinalizationCallbackTraced(
- reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
-}
-
} // namespace v8
#endif // INCLUDE_V8_TRACED_HANDLE_H_
diff --git a/deps/v8/include/v8-weak-callback-info.h b/deps/v8/include/v8-weak-callback-info.h
index f1677e9da0..6d0fb3ac1d 100644
--- a/deps/v8/include/v8-weak-callback-info.h
+++ b/deps/v8/include/v8-weak-callback-info.h
@@ -68,8 +68,8 @@ enum class WeakCallbackType {
* before the object is actually reclaimed, allowing it to be resurrected. In
* this case it is not possible to set a second-pass callback.
*/
- kFinalizer V8_ENUM_DEPRECATE_SOON("Resurrecting finalizers are deprecated "
- "and will not be supported going forward.")
+ kFinalizer V8_ENUM_DEPRECATED("Resurrecting finalizers are deprecated "
+ "and will not be supported going forward.")
};
template <class T>
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 8aaf091280..29d4bea237 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -172,6 +172,8 @@
namespace v8 {
+static OOMErrorCallback g_oom_error_callback = nullptr;
+
static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Script> script) {
i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
@@ -228,8 +230,9 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
memset(js_stacktrace, 0x0BADC0DE, Heap::kStacktraceBufferSize + 1);
memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats));
- // Note that the embedder's oom handler is also not available and therefore
- // won't be called in this case. We just crash.
+ // Give the embedder a chance to handle the condition. If it doesn't,
+ // just crash.
+ if (g_oom_error_callback) g_oom_error_callback(location, is_heap_oom);
FATAL("Fatal process out of memory: %s", location);
UNREACHABLE();
}
@@ -304,6 +307,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
}
}
Utils::ReportOOMFailure(isolate, location, is_heap_oom);
+ if (g_oom_error_callback) g_oom_error_callback(location, is_heap_oom);
// If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
@@ -807,17 +811,16 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
namespace internal {
-i::Address* GlobalizeTracedReference(
- i::Isolate* isolate, i::Address* obj, internal::Address* slot,
- GlobalHandleDestructionMode destruction_mode,
- GlobalHandleStoreMode store_mode) {
+i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
+ internal::Address* slot,
+ GlobalHandleStoreMode store_mode) {
LOG_API(isolate, TracedGlobal, New);
#ifdef DEBUG
Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
"the address slot must be not null");
#endif
- i::Handle<i::Object> result = isolate->global_handles()->CreateTraced(
- *obj, slot, destruction_mode, store_mode);
+ i::Handle<i::Object> result =
+ isolate->global_handles()->CreateTraced(*obj, slot, store_mode);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -826,24 +829,17 @@ i::Address* GlobalizeTracedReference(
return result.location();
}
-void MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to) {
- GlobalHandles::MoveTracedGlobal(from, to);
-}
-
-void CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to) {
- GlobalHandles::CopyTracedGlobal(from, to);
+void MoveTracedReference(internal::Address** from, internal::Address** to) {
+ GlobalHandles::MoveTracedReference(from, to);
}
-void DisposeTracedGlobal(internal::Address* location) {
- GlobalHandles::DestroyTraced(location);
+void CopyTracedReference(const internal::Address* const* from,
+ internal::Address** to) {
+ GlobalHandles::CopyTracedReference(from, to);
}
-void SetFinalizationCallbackTraced(internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
- callback);
+void DisposeTracedReference(internal::Address* location) {
+ GlobalHandles::DestroyTracedReference(location);
}
} // namespace internal
@@ -1638,6 +1634,19 @@ static void TemplateSetAccessor(
i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info);
}
+void Template::SetNativeDataProperty(v8::Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Local<Value> data,
+ PropertyAttribute attribute,
+ AccessControl settings,
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ Local<AccessorSignature>(), true, false,
+ getter_side_effect_type, setter_side_effect_type);
+}
+
void Template::SetNativeDataProperty(
v8::Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter, v8::Local<Value> data,
@@ -1649,6 +1658,19 @@ void Template::SetNativeDataProperty(
setter_side_effect_type);
}
+void Template::SetNativeDataProperty(v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Local<Value> data,
+ PropertyAttribute attribute,
+ AccessControl settings,
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ Local<AccessorSignature>(), true, false,
+ getter_side_effect_type, setter_side_effect_type);
+}
+
void Template::SetNativeDataProperty(
v8::Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter, v8::Local<Value> data,
@@ -1688,6 +1710,32 @@ void ObjectTemplate::SetAccessor(v8::Local<String> name,
AccessorSetterCallback setter,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ Local<AccessorSignature>(),
+ i::FLAG_disable_old_api_accessors, false,
+ getter_side_effect_type, setter_side_effect_type);
+}
+
+void ObjectTemplate::SetAccessor(v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Local<Value> data, AccessControl settings,
+ PropertyAttribute attribute,
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ Local<AccessorSignature>(),
+ i::FLAG_disable_old_api_accessors, false,
+ getter_side_effect_type, setter_side_effect_type);
+}
+
+void ObjectTemplate::SetAccessor(v8::Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Local<Value> data, AccessControl settings,
+ PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
@@ -6062,6 +6110,11 @@ void V8::SetUnhandledExceptionCallback(
}
#endif // V8_OS_WIN
+void v8::V8::SetFatalMemoryErrorCallback(
+ v8::OOMErrorCallback oom_error_callback) {
+ g_oom_error_callback = oom_error_callback;
+}
+
void v8::V8::SetEntropySource(EntropySource entropy_source) {
base::RandomNumberGenerator::SetEntropySource(entropy_source);
}
@@ -9878,15 +9931,22 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) {
use_precise_sampling);
}
-CpuProfilingStatus CpuProfiler::StartProfiling(
+CpuProfilingResult CpuProfiler::Start(
+ CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ options, std::move(delegate));
+}
+
+CpuProfilingResult CpuProfiler::Start(
Local<String> title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options, std::move(delegate));
}
-CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
- bool record_samples) {
+CpuProfilingResult CpuProfiler::Start(Local<String> title,
+ bool record_samples) {
CpuProfilingOptions options(
kLeafNodeLineNumbers,
record_samples ? CpuProfilingOptions::kNoSampleLimit : 0);
@@ -9894,13 +9954,31 @@ CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
*Utils::OpenHandle(*title), options);
}
+CpuProfilingResult CpuProfiler::Start(Local<String> title,
+ CpuProfilingMode mode,
+ bool record_samples,
+ unsigned max_samples) {
+ CpuProfilingOptions options(mode, record_samples ? max_samples : 0);
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ *Utils::OpenHandle(*title), options);
+}
+
+CpuProfilingStatus CpuProfiler::StartProfiling(
+ Local<String> title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ return Start(title, options, std::move(delegate)).status;
+}
+
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ bool record_samples) {
+ return Start(title, record_samples).status;
+}
+
CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
CpuProfilingMode mode,
bool record_samples,
unsigned max_samples) {
- CpuProfilingOptions options(mode, record_samples ? max_samples : 0);
- return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
- *Utils::OpenHandle(*title), options);
+ return Start(title, mode, record_samples, max_samples).status;
}
CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
@@ -9909,6 +9987,11 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
*Utils::OpenHandle(*title)));
}
+CpuProfile* CpuProfiler::Stop(ProfilerId id) {
+ return reinterpret_cast<CpuProfile*>(
+ reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(id));
+}
+
void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)
->SetDetailedSourcePositionsForProfiling(true);
@@ -10254,11 +10337,6 @@ bool EmbedderHeapTracer::IsRootForNonTracingGC(
return true;
}
-bool EmbedderHeapTracer::IsRootForNonTracingGC(
- const v8::TracedGlobal<v8::Value>& handle) {
- return true;
-}
-
void EmbedderHeapTracer::ResetHandleInNonTracingGC(
const v8::TracedReference<v8::Value>& handle) {
UNREACHABLE();
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index fe2536fa0a..b8cfcd19d8 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -3549,10 +3549,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3733,74 +3729,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ ldr(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ ldr(slot_arg, MemOperand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ ldr(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kSuccess));
- __ b(ne, &deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kBailout));
- __ b(eq, &bailout);
-
- if (FLAG_debug_code) {
- __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kDeopt));
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index e6321c614c..896115b3e7 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -4067,10 +4067,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4251,90 +4247,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<CodeT> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) {
- registers |= RegList::FromBits(static_cast<uint32_t>(kCallerSaved.bits()));
- }
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- // Make sure we can use x16 and x17, and add slot_arg as a temp reg if needed.
- UseScratchRegisterScope temps(masm);
- temps.Exclude(x16, x17);
- temps.Include(slot_arg);
- // Load return address into x17 and decode into handler_arg.
- __ Add(x16, fp, CommonFrameConstants::kCallerSPOffset);
- __ Ldr(x17, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ Autib1716();
- __ Mov(handler_arg, x17);
-#else
- __ Ldr(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
-#endif
-
- __ Ldr(slot_arg, MemOperand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ Ldr(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ CompareAndBranch(
- x0, static_cast<int32_t>(DynamicCheckMapsStatus::kSuccess), ne, &deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ Bind(&deopt);
- __ CompareAndBranch(
- x0, static_cast<int32_t>(DynamicCheckMapsStatus::kBailout), eq, &bailout);
-
- if (FLAG_debug_code) {
- __ Cmp(x0, Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ Bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 53533f5ac7..2f72e0a15a 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -46,11 +46,7 @@ namespace internal {
/* Deoptimization entries. */ \
ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
- ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
- ASM(DynamicCheckMapsTrampoline, DynamicCheckMaps) \
- ASM(DynamicCheckMapsWithFeedbackVectorTrampoline, \
- DynamicCheckMapsWithFeedbackVector) \
\
/* GC write barrier. */ \
TFC(RecordWriteEmitRememberedSetSaveFP, WriteBarrier) \
@@ -302,10 +298,6 @@ namespace internal {
TFH(KeyedHasIC_SloppyArguments, LoadWithVector) \
TFH(HasIndexedInterceptorIC, LoadWithVector) \
\
- /* Dynamic check maps */ \
- TFC(DynamicCheckMaps, DynamicCheckMaps) \
- TFC(DynamicCheckMapsWithFeedbackVector, DynamicCheckMapsWithFeedbackVector) \
- \
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
ASM(RunMicrotasksTrampoline, RunMicrotasksEntry) \
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 744c057099..7b157378d9 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -254,23 +254,5 @@ void Builtins::Generate_LookupContextInsideTypeofBaseline(
assembler.GenerateLookupContextBaseline(TypeofMode::kInside);
}
-TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
- auto map = Parameter<Map>(Descriptor::kMap);
- auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
- auto handler = Parameter<Object>(Descriptor::kHandler);
- TNode<Int32T> status = DynamicCheckMaps(map, slot, handler);
- Return(status);
-}
-
-TF_BUILTIN(DynamicCheckMapsWithFeedbackVector, CodeStubAssembler) {
- auto map = Parameter<Map>(Descriptor::kMap);
- auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
- auto handler = Parameter<Object>(Descriptor::kHandler);
- auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
- TNode<Int32T> status =
- DynamicCheckMapsWithFeedbackVector(map, slot, handler, feedback_vector);
- Return(status);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 57e09018cf..0105977c44 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -303,10 +303,6 @@ class Builtins {
static void Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode);
- template <class Descriptor>
- static void Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm,
- Handle<CodeT> builtin_target);
-
#define DECLARE_ASM(Name, ...) \
static void Generate_##Name(MacroAssembler* masm);
#define DECLARE_TF(Name, ...) \
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index c217c6c7c3..2df39166c9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -4163,10 +4163,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4356,73 +4352,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ mov(handler_arg, Operand(ebp, CommonFrameConstants::kCallerPCOffset));
- __ mov(slot_arg,
- Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ mov(handler_arg,
- Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
- __ j(not_equal, &deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
- __ j(equal, &bailout);
-
- if (FLAG_debug_code) {
- __ cmp(eax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- __ Assert(equal, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ic-dynamic-check-maps.tq b/deps/v8/src/builtins/ic-dynamic-check-maps.tq
deleted file mode 100644
index 3e194116fd..0000000000
--- a/deps/v8/src/builtins/ic-dynamic-check-maps.tq
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be:
-// Context found in the LICENSE file.
-
-namespace ic {
-
-const kSuccess: constexpr int32
- generates 'static_cast<int>(DynamicCheckMapsStatus::kSuccess)';
-const kBailout: constexpr int32
- generates 'static_cast<int>(DynamicCheckMapsStatus::kBailout)';
-const kDeopt: constexpr int32
- generates 'static_cast<int>(DynamicCheckMapsStatus::kDeopt)';
-extern macro LoadFeedbackVectorForStubWithTrampoline(): FeedbackVector;
-
-macro PerformPolymorphicCheck(
- expectedPolymorphicArray: HeapObject, actualMap: Map,
- actualHandler: Smi|DataHandler): int32 {
- if (!Is<WeakFixedArray>(expectedPolymorphicArray)) {
- return kDeopt;
- }
-
- const polymorphicArray = UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
- const weakActualMap = MakeWeak(actualMap);
- const length = polymorphicArray.length_intptr;
- dcheck(length > 0);
-
- for (let mapIndex: intptr = 0; mapIndex < length;
- mapIndex += FeedbackIteratorEntrySize()) {
- const maybeCachedMap =
- UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
- if (maybeCachedMap == weakActualMap) {
- const handlerIndex = mapIndex + FeedbackIteratorHandlerOffset();
- dcheck(handlerIndex < length);
- const maybeHandler =
- Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
- if (TaggedEqual(maybeHandler, actualHandler)) {
- return kSuccess;
- } else {
- return kDeopt;
- }
- }
- }
-
- return kBailout;
-}
-
-macro PerformMonomorphicCheck(
- feedbackVector: FeedbackVector, slotIndex: intptr, expectedMap: HeapObject,
- actualMap: Map, actualHandler: Smi|DataHandler): int32 {
- if (TaggedEqual(expectedMap, actualMap)) {
- const handlerIndex = slotIndex + 1;
- dcheck(handlerIndex < feedbackVector.length_intptr);
- const maybeHandler =
- Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
- if (TaggedEqual(actualHandler, maybeHandler)) {
- return kSuccess;
- }
-
- return kDeopt;
- }
-
- return kBailout;
-}
-
-// This builtin performs map checks by dynamically looking at the
-// feedback in the feedback vector.
-//
-// There are two major cases handled by this builtin:
-// (a) Monormorphic check
-// (b) Polymorphic check
-//
-// For the monormophic check, the incoming map is migrated and checked
-// against the map and handler in the feedback vector.
-//
-// For the polymorphic check, the feedback vector is iterated over and
-// each of the maps & handers are compared against the incoming map and
-// handler.
-//
-// If any of the map and associated handler checks pass then we return
-// kSuccess status. If we have never seen the map before, we return kBailout
-// status to bailout to the interpreter and update the feedback. If we have seen
-// the map, but the associated handler check fails then we return kDeopt status.
-@export
-macro DynamicCheckMaps(
- actualMap: Map, slotIndex: intptr, actualHandler: Smi|DataHandler): int32 {
- const feedbackVector = LoadFeedbackVectorForStubWithTrampoline();
- return DynamicCheckMapsWithFeedbackVector(
- actualMap, slotIndex, actualHandler, feedbackVector);
-}
-
-@export
-macro DynamicCheckMapsWithFeedbackVector(
- actualMap: Map, slotIndex: intptr, actualHandler: Smi|DataHandler,
- feedbackVector: FeedbackVector): int32 {
- const feedback = feedbackVector[slotIndex];
- try {
- const maybePolymorphicArray =
- GetHeapObjectIfStrong(feedback) otherwise MigrateAndDoMonomorphicCheck;
- return PerformPolymorphicCheck(
- maybePolymorphicArray, actualMap, actualHandler);
- } label MigrateAndDoMonomorphicCheck {
- const expectedMap = GetHeapObjectAssumeWeak(feedback) otherwise Deopt;
- return PerformMonomorphicCheck(
- feedbackVector, slotIndex, expectedMap, actualMap, actualHandler);
- } label Deopt {
- return kDeopt;
- }
-}
-
-} // namespace ic
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
index 8033944139..10849667fd 100644
--- a/deps/v8/src/builtins/loong64/builtins-loong64.cc
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -3565,10 +3565,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3750,74 +3746,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ Ld_d(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ Ld_d(
- slot_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ Ld_d(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ Branch(&deopt, ne, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)));
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ Branch(&bailout, eq, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
-
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 64ecb55f23..a907e0cedf 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -4013,10 +4013,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4196,74 +4192,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ Lw(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ Lw(slot_arg, MemOperand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ Lw(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ Branch(&deopt, ne, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ Branch(&bailout, eq, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
-
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 85872b3d5c..ea574acfd8 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -3591,10 +3591,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3774,73 +3770,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ Ld(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ Uld(slot_arg, MemOperand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ Uld(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ Branch(&deopt, ne, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ Branch(&bailout, eq, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
-
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, v0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 96322fcc4b..02421e5c21 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -3470,10 +3470,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3496,76 +3492,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
__ bkpt(0);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ LoadU64(handler_arg,
- MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadU64(
- slot_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadU64(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ cmpi(r3, Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
- __ bne(&deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ cmpi(r3, Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
- __ beq(&bailout);
-
- if (FLAG_debug_code) {
- __ cmpi(r3, Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index 11a8f5156c..fd0976e1c1 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -3678,10 +3678,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3867,74 +3863,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicMapChecks builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ Ld(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ Uld(slot_arg, MemOperand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ Uld(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ Branch(&deopt, ne, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)),
- Label::Distance::kNear);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ Branch(&bailout, eq, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
-
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
- Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 9b328cf3fc..60c3f60e66 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -3851,10 +3851,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3886,76 +3882,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<Code> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ LoadU64(handler_arg,
- MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadU64(
- slot_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadU64(
- handler_arg,
- MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
- __ bne(&deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
- __ beq(&bailout);
-
- if (FLAG_debug_code) {
- __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 9ffd1ea2be..342660d21d 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -4989,10 +4989,6 @@ void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
-void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -5170,74 +5166,6 @@ void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
-void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
-}
-
-void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
- MacroAssembler* masm) {
- Generate_DynamicCheckMapsTrampoline<
- DynamicCheckMapsWithFeedbackVectorDescriptor>(
- masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
-}
-
-template <class Descriptor>
-void Builtins::Generate_DynamicCheckMapsTrampoline(
- MacroAssembler* masm, Handle<CodeT> builtin_target) {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
-
- // Only save the registers that the DynamicCheckMaps builtin can clobber.
- Descriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
- // FLAG_debug_code is enabled CSA checks will call C function and so we need
- // to save all CallerSaved registers too.
- if (FLAG_debug_code) registers |= kCallerSaved;
- __ MaybeSaveRegisters(registers);
-
- // Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
- Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
- __ movq(handler_arg, Operand(rbp, CommonFrameConstants::kCallerPCOffset));
- __ movq(slot_arg, Operand(handler_arg,
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ movq(
- handler_arg,
- Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
-
- __ Call(builtin_target, RelocInfo::CODE_TARGET);
-
- Label deopt, bailout;
- __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
- __ j(not_equal, &deopt);
-
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- __ Ret();
-
- __ bind(&deopt);
- __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
- __ j(equal, &bailout);
-
- if (FLAG_debug_code) {
- __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
- __ Assert(equal, AbortReason::kUnexpectedDynamicCheckMapsStatus);
- }
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<CodeT> deopt_eager = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
- __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
-
- __ bind(&bailout);
- __ MaybeRestoreRegisters(registers);
- __ LeaveFrame(StackFrame::INTERNAL);
- Handle<CodeT> deopt_bailout = masm->isolate()->builtins()->code_handle(
- Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
- __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
index 70e8e9f361..d5e410f1a2 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r0);
- return RegisterArray(r0, r1, r2, r3, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r0);
- return RegisterArray(r0, r1, r2, r3, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return r1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return r2; }
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index d0d854dc50..0c7df90dbf 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2652,12 +2652,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- b(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
-
// The above code must not emit constants either.
DCHECK(!has_pending_constants());
}
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
index 709a01264d..227b0d9c56 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -43,18 +43,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == x0);
- return RegisterArray(x0, x1, x2, x3, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == x0);
- return RegisterArray(x0, x1, x2, x3, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return x1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return x2; }
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 6ea0322afe..191eb4bd20 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -2222,12 +2222,6 @@ void TurboAssembler::CallForDeoptimization(
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
-
- if (kind == DeoptimizeKind::kEagerWithResume) {
- b(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index c2374536f7..cf01b360d6 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -10,85 +10,84 @@
namespace v8 {
namespace internal {
-#define ABORT_MESSAGES_LIST(V) \
- V(kNoReason, "no reason") \
- \
- V(k32BitValueInRegisterIsNotZeroExtended, \
- "32 bit value in register is not zero-extended") \
- V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
- V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
- V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
- V(kExpectedOptimizationSentinel, \
- "Expected optimized code cell or optimization sentinel") \
- V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kExpectedFeedbackVector, "Expected feedback vector") \
- V(kExpectedBaselineData, "Expected baseline data") \
- V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
- "The function_data field should be a BytecodeArray on interpreter entry") \
- V(kInputStringTooLong, "Input string too long") \
- V(kInvalidBytecode, "Invalid bytecode") \
- V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
- V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
- V(kInvalidJumpTableIndex, "Invalid jump table index") \
- V(kInvalidParametersAndRegistersInGenerator, \
- "invalid parameters and registers in generator") \
- V(kMissingBytecodeArray, "Missing bytecode array from function") \
- V(kObjectNotTagged, "The object is not tagged") \
- V(kObjectTagged, "The object is tagged") \
- V(kOffsetOutOfRange, "Offset out of range") \
- V(kOperandIsASmi, "Operand is a smi") \
- V(kOperandIsASmiAndNotABoundFunction, \
- "Operand is a smi and not a bound function") \
- V(kOperandIsASmiAndNotAConstructor, \
- "Operand is a smi and not a constructor") \
- V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
- V(kOperandIsASmiAndNotAGeneratorObject, \
- "Operand is a smi and not a generator object") \
- V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
- V(kOperandIsNotAConstructor, "Operand is not a constructor") \
- V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
- V(kOperandIsNotAFunction, "Operand is not a function") \
- V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
- V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
- V(kOperandIsNotACodeT, "Operand is not a CodeT") \
- V(kOperandIsNotASmi, "Operand is not a smi") \
- V(kPromiseAlreadySettled, "Promise already settled") \
- V(kReceivedInvalidReturnAddress, "Received invalid return address") \
- V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function") \
- V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
- V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
- V(kUnexpectedDynamicCheckMapsStatus, "Unexpected dynamic map checks status") \
- V(kUnexpectedElementsKindInArrayConstructor, \
- "Unexpected ElementsKind in array constructor") \
- V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
- V(kUnexpectedFunctionIDForInvokeIntrinsic, \
- "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
- V(kUnexpectedInitialMapForArrayFunction, \
- "Unexpected initial map for Array function") \
- V(kUnexpectedLevelAfterReturnFromApiCall, \
- "Unexpected level after return from api call") \
- V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedReturnFromFrameDropper, \
- "Unexpectedly returned from dropping frames") \
- V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
- V(kUnexpectedReturnFromWasmTrap, \
- "Should not return after throwing a wasm trap") \
- V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
- V(kUnexpectedValue, "Unexpected value") \
- V(kUnsupportedModuleOperation, "Unsupported module operation") \
- V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
- V(kWrongAddressOrValuePassedToRecordWrite, \
- "Wrong address or value passed to RecordWrite") \
- V(kWrongArgumentCountForInvokeIntrinsic, \
- "Wrong number of arguments for intrinsic") \
- V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
- V(kWrongFunctionContext, "Wrong context passed to function") \
- V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
+#define ABORT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(k32BitValueInRegisterIsNotZeroExtended, \
+ "32 bit value in register is not zero-extended") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
+ V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
+ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
+ V(kExpectedOptimizationSentinel, \
+ "Expected optimized code cell or optimization sentinel") \
+ V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
+ V(kExpectedFeedbackVector, "Expected feedback vector") \
+ V(kExpectedBaselineData, "Expected baseline data") \
+ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
+ "The function_data field should be a BytecodeArray on interpreter entry") \
+ V(kInputStringTooLong, "Input string too long") \
+ V(kInvalidBytecode, "Invalid bytecode") \
+ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
+ V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidJumpTableIndex, "Invalid jump table index") \
+ V(kInvalidParametersAndRegistersInGenerator, \
+ "invalid parameters and registers in generator") \
+ V(kMissingBytecodeArray, "Missing bytecode array from function") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kObjectTagged, "The object is tagged") \
+ V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsASmiAndNotABoundFunction, \
+ "Operand is a smi and not a bound function") \
+ V(kOperandIsASmiAndNotAConstructor, \
+ "Operand is a smi and not a constructor") \
+ V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
+ V(kOperandIsASmiAndNotAGeneratorObject, \
+ "Operand is a smi and not a generator object") \
+ V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
+ V(kOperandIsNotAConstructor, "Operand is not a constructor") \
+ V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
+ V(kOperandIsNotAFunction, "Operand is not a function") \
+ V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
+ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
+ V(kOperandIsNotACodeT, "Operand is not a CodeT") \
+ V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kPromiseAlreadySettled, "Promise already settled") \
+ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
+ V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kShouldNotDirectlyEnterOsrFunction, \
+ "Should not directly enter OSR-compiled function") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
+ V(kStackFrameTypesMustMatch, "Stack frame types must match") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
+ V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
+ V(kUnexpectedElementsKindInArrayConstructor, \
+ "Unexpected ElementsKind in array constructor") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedFunctionIDForInvokeIntrinsic, \
+ "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
+ V(kUnexpectedLevelAfterReturnFromApiCall, \
+ "Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
+ V(kUnexpectedReturnFromFrameDropper, \
+ "Unexpectedly returned from dropping frames") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnexpectedReturnFromWasmTrap, \
+ "Should not return after throwing a wasm trap") \
+ V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
+ V(kUnexpectedValue, "Unexpected value") \
+ V(kUnsupportedModuleOperation, "Unsupported module operation") \
+ V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
+ V(kWrongAddressOrValuePassedToRecordWrite, \
+ "Wrong address or value passed to RecordWrite") \
+ V(kWrongArgumentCountForInvokeIntrinsic, \
+ "Wrong number of arguments for intrinsic") \
+ V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
+ V(kWrongFunctionContext, "Wrong context passed to function") \
+ V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set")
#define BAILOUT_MESSAGES_LIST(V) \
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
index 2da91a0f59..2ac0ee94d3 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -36,20 +36,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(esi == kContextRegister);
- STATIC_ASSERT(eax == kReturnRegister0);
- return RegisterArray(eax, ecx, edx, edi, esi);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(esi == kContextRegister);
- STATIC_ASSERT(eax == kReturnRegister0);
- return RegisterArray(eax, ecx, edx, edi, esi);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return edx; }
// static
constexpr Register LoadDescriptor::NameRegister() { return ecx; }
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 0678ad31c5..150ffd6608 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -2036,16 +2036,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
-
- if (kind == DeoptimizeKind::kEagerWithResume) {
- bool old_predictable_code_size = predictable_code_size();
- set_predictable_code_size(true);
-
- jmp(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- set_predictable_code_size(old_predictable_code_size);
- }
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 3e10c6dcd7..081614e9c4 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -69,8 +69,6 @@ namespace internal {
V(ConstructWithSpread_WithFeedback) \
V(ContextOnly) \
V(CppBuiltinAdaptor) \
- V(DynamicCheckMaps) \
- V(DynamicCheckMapsWithFeedbackVector) \
V(FastNewObject) \
V(ForInPrepare) \
V(GetIteratorStackParameter) \
@@ -1065,39 +1063,6 @@ class LoadGlobalWithVectorDescriptor
static constexpr auto registers();
};
-class DynamicCheckMapsDescriptor final
- : public StaticCallInterfaceDescriptor<DynamicCheckMapsDescriptor> {
- public:
- DEFINE_PARAMETERS(kMap, kSlot, kHandler)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // return val
- MachineType::TaggedPointer(), // kMap
- MachineType::IntPtr(), // kSlot
- MachineType::TaggedSigned()) // kHandler
-
- DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor)
-
- static constexpr auto registers();
- static constexpr bool kRestrictAllocatableRegisters = true;
-};
-
-class DynamicCheckMapsWithFeedbackVectorDescriptor final
- : public StaticCallInterfaceDescriptor<
- DynamicCheckMapsWithFeedbackVectorDescriptor> {
- public:
- DEFINE_PARAMETERS(kMap, kFeedbackVector, kSlot, kHandler)
- DEFINE_RESULT_AND_PARAMETER_TYPES(
- MachineType::Int32(), // return val
- MachineType::TaggedPointer(), // kMap
- MachineType::TaggedPointer(), // kFeedbackVector
- MachineType::IntPtr(), // kSlot
- MachineType::TaggedSigned()) // kHandler
-
- DECLARE_DESCRIPTOR(DynamicCheckMapsWithFeedbackVectorDescriptor)
-
- static constexpr auto registers();
- static constexpr bool kRestrictAllocatableRegisters = true;
-};
-
class FastNewObjectDescriptor
: public StaticCallInterfaceDescriptor<FastNewObjectDescriptor> {
public:
diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
index 5b4e8c8e71..b06ce162d2 100644
--- a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == a0);
- return RegisterArray(a0, a1, a2, a3, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == a0);
- return RegisterArray(a0, a1, a2, a3, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index 982c4b7eb8..d685aaafdd 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -4090,12 +4090,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
-
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Branch(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
index 6b5a791cff..b9025b032c 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -38,18 +38,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == v0);
- return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == v0);
- return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 53c2217d52..338c0debf6 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -5574,11 +5574,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Branch(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
index 7d6ba8bc73..b1df0a7c62 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == v0);
- return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == v0);
- return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 2d13884be3..46be9ee787 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -6117,12 +6117,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
-
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Branch(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index 15e673b1db..8c41f87130 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r3);
- return RegisterArray(r3, r4, r5, r6, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r3);
- return RegisterArray(r3, r4, r5, r6, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return r4; }
// static
constexpr Register LoadDescriptor::NameRegister() { return r5; }
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 2727749295..6275d14e89 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -3710,11 +3710,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- b(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
index 62587b74f9..d987269153 100644
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -44,18 +44,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == a0);
- return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == a0);
- return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 3efba1211e..52bba9f21c 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -4990,11 +4990,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Branch(ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index 9864ff4db9..9399b289e4 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -42,18 +42,6 @@ constexpr auto WriteBarrierDescriptor::registers() {
}
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r2);
- return RegisterArray(r2, r3, r4, r5, cp);
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
- STATIC_ASSERT(kReturnRegister0 == r2);
- return RegisterArray(r2, r3, r4, r5, cp);
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return r3; }
// static
constexpr Register LoadDescriptor::NameRegister() { return r4; }
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 79a0130de2..1037eff0cd 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -4829,11 +4829,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- bc_long(Condition::al, ret);
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- }
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index af9b2e1cf2..cff15e297d 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -54,30 +54,6 @@ constexpr auto TSANLoadDescriptor::registers() {
#endif // V8_IS_TSAN
// static
-constexpr auto DynamicCheckMapsDescriptor::registers() {
-#if V8_TARGET_OS_WIN
- return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
- kRuntimeCallFunctionRegister, kContextRegister);
-#else
- STATIC_ASSERT(kContextRegister == arg_reg_2);
- return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
- kRuntimeCallFunctionRegister);
-#endif // V8_TARGET_OS_WIN
-}
-
-// static
-constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
-#if V8_TARGET_OS_WIN
- return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
- kRuntimeCallFunctionRegister, kContextRegister);
-#else
- STATIC_ASSERT(kContextRegister == arg_reg_2);
- return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
- kRuntimeCallFunctionRegister);
-#endif // V8_TARGET_OS_WIN
-}
-
-// static
constexpr Register LoadDescriptor::ReceiverRegister() { return rdx; }
// static
constexpr Register LoadDescriptor::NameRegister() { return rcx; }
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 4e351a2f56..4e28e4df66 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -3111,16 +3111,6 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
-
- if (kind == DeoptimizeKind::kEagerWithResume) {
- bool old_predictable_code_size = predictable_code_size();
- set_predictable_code_size(true);
- jmp(ret);
-
- DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- set_predictable_code_size(old_predictable_code_size);
- }
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index f26b0a06e2..cd374c8238 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -520,21 +520,13 @@ constexpr int kNoDeoptimizationId = -1;
// code is executed.
// - Soft: similar to lazy deoptimization, but does not contribute to the
// total deopt count which can lead to disabling optimization for a function.
-// - Bailout: a check failed in the optimized code but we don't
-// deoptimize the code, but try to heal the feedback and try to rerun
-// the optimized code again.
-// - EagerWithResume: a check failed in the optimized code, but we can execute
-// a more expensive check in a builtin that might either result in us resuming
-// execution in the optimized code, or deoptimizing immediately.
enum class DeoptimizeKind : uint8_t {
kEager,
kSoft,
- kBailout,
kLazy,
- kEagerWithResume,
};
constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
-constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kEagerWithResume;
+constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
@@ -548,10 +540,6 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
return os << "Soft";
case DeoptimizeKind::kLazy:
return os << "Lazy";
- case DeoptimizeKind::kBailout:
- return os << "Bailout";
- case DeoptimizeKind::kEagerWithResume:
- return os << "EagerMaybeResume";
}
}
@@ -1818,12 +1806,6 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
// can be used in Torque.
enum class VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
-enum class DynamicCheckMapsStatus : uint8_t {
- kSuccess = 0,
- kBailout = 1,
- kDeopt = 2
-};
-
#ifdef V8_COMPRESS_POINTERS
class PtrComprCageBase {
public:
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 67283d9da1..53cab92b8e 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -178,20 +178,6 @@ PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant(
constant, property_name, {{receiver_map}, zone});
}
-// static
-MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::DataField(
- int offset, bool is_inobject, Representation field_representation,
- Type field_type) {
- return MinimorphicLoadPropertyAccessInfo(kDataField, offset, is_inobject,
- field_representation, field_type);
-}
-
-// static
-MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::Invalid() {
- return MinimorphicLoadPropertyAccessInfo(
- kInvalid, -1, false, Representation::None(), Type::None());
-}
-
PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
: kind_(kInvalid),
lookup_start_object_maps_(zone),
@@ -262,15 +248,6 @@ PropertyAccessInfo::PropertyAccessInfo(
dictionary_index_(dictionary_index),
name_{name} {}
-MinimorphicLoadPropertyAccessInfo::MinimorphicLoadPropertyAccessInfo(
- Kind kind, int offset, bool is_inobject,
- Representation field_representation, Type field_type)
- : kind_(kind),
- is_inobject_(is_inobject),
- offset_(offset),
- field_representation_(field_representation),
- field_type_(field_type) {}
-
namespace {
template <class RefT>
@@ -682,20 +659,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
access_mode, get_accessors);
}
-MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
- MinimorphicLoadPropertyAccessFeedback const& feedback) const {
- DCHECK(feedback.handler()->IsSmi());
- int handler = Smi::cast(*feedback.handler()).value();
- bool is_inobject = LoadHandler::IsInobjectBits::decode(handler);
- bool is_double = LoadHandler::IsDoubleBits::decode(handler);
- int offset = LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize;
- Representation field_rep =
- is_double ? Representation::Double() : Representation::Tagged();
- Type field_type = is_double ? Type::Number() : Type::Any();
- return MinimorphicLoadPropertyAccessInfo::DataField(offset, is_inobject,
- field_rep, field_type);
-}
-
bool AccessInfoFactory::TryLoadPropertyDetails(
MapRef map, base::Optional<JSObjectRef> maybe_holder, NameRef name,
InternalIndex* index_out, PropertyDetails* details_out) const {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 827c253e1f..5b0b9bee2c 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -22,7 +22,6 @@ class CompilationDependencies;
class CompilationDependency;
class ElementAccessFeedback;
class JSHeapBroker;
-class MinimorphicLoadPropertyAccessFeedback;
class TypeCache;
struct ConstFieldInfo;
@@ -214,36 +213,6 @@ class PropertyAccessInfo final {
base::Optional<NameRef> name_;
};
-// This class encapsulates information required to generate load properties
-// by only using the information from handlers. This information is used with
-// dynamic map checks.
-class MinimorphicLoadPropertyAccessInfo final {
- public:
- enum Kind { kInvalid, kDataField };
- static MinimorphicLoadPropertyAccessInfo DataField(
- int offset, bool is_inobject, Representation field_representation,
- Type field_type);
- static MinimorphicLoadPropertyAccessInfo Invalid();
-
- bool IsInvalid() const { return kind_ == kInvalid; }
- bool IsDataField() const { return kind_ == kDataField; }
- int offset() const { return offset_; }
- int is_inobject() const { return is_inobject_; }
- Type field_type() const { return field_type_; }
- Representation field_representation() const { return field_representation_; }
-
- private:
- MinimorphicLoadPropertyAccessInfo(Kind kind, int offset, bool is_inobject,
- Representation field_representation,
- Type field_type);
-
- Kind kind_;
- bool is_inobject_;
- int offset_;
- Representation field_representation_;
- Type field_type_;
-};
-
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
@@ -264,9 +233,6 @@ class AccessInfoFactory final {
InternalIndex dict_index, AccessMode access_mode,
PropertyDetails details) const;
- MinimorphicLoadPropertyAccessInfo ComputePropertyAccessInfo(
- MinimorphicLoadPropertyAccessFeedback const& feedback) const;
-
// Merge as many of the given {infos} as possible and record any dependencies.
// Return false iff any of them was invalid, in which case no dependencies are
// recorded.
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 3f1842f64e..de80d20d51 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -3298,24 +3298,19 @@ void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
__ ForceConstantPoolEmissionWithoutJump();
- // We are conservative here, assuming all deopts are eager with resume deopts.
- DCHECK_GE(Deoptimizer::kEagerWithResumeDeoptExitSize,
- Deoptimizer::kLazyDeoptExitSize);
+ // We are conservative here, reserving sufficient space for the largest deopt
+ // kind.
DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
Deoptimizer::kNonLazyDeoptExitSize);
- __ CheckVeneerPool(false, false,
- static_cast<int>(exits->size()) *
- Deoptimizer::kEagerWithResumeDeoptExitSize);
+ __ CheckVeneerPool(
+ false, false,
+ static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
// Check which deopt kinds exist in this Code object, to avoid emitting jumps
// to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
- bool saw_deopt_with_resume_reason[kDeoptimizeReasonCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
- if (exit->kind() == DeoptimizeKind::kEagerWithResume) {
- saw_deopt_with_resume_reason[static_cast<int>(exit->reason())] = true;
- }
}
// Emit the jumps to deoptimization entries.
@@ -3325,21 +3320,9 @@ void CodeGenerator::PrepareForDeoptimizationExits(
for (int i = 0; i < kDeoptimizeKindCount; i++) {
if (!saw_deopt_kind[i]) continue;
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- for (int j = 0; j < kDeoptimizeReasonCount; j++) {
- if (!saw_deopt_with_resume_reason[j]) continue;
- DeoptimizeReason reason = static_cast<DeoptimizeReason>(j);
- __ bind(&jump_deoptimization_or_resume_entry_labels_[j]);
- __ LoadEntryFromBuiltin(Deoptimizer::GetDeoptWithResumeBuiltin(reason),
- scratch);
- __ Jump(scratch);
- }
- } else {
- __ bind(&jump_deoptimization_entry_labels_[i]);
- __ LoadEntryFromBuiltin(Deoptimizer::GetDeoptimizationEntry(kind),
- scratch);
- __ Jump(scratch);
- }
+ __ bind(&jump_deoptimization_entry_labels_[i]);
+ __ LoadEntryFromBuiltin(Deoptimizer::GetDeoptimizationEntry(kind), scratch);
+ __ Jump(scratch);
}
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index e03f6d843e..7057b47369 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -151,58 +151,6 @@ uint32_t CodeGenerator::GetStackCheckOffset() {
return std::max(frame_height_delta, max_pushed_argument_bytes);
}
-void CodeGenerator::AssembleDeoptImmediateArgs(
- const ZoneVector<ImmediateOperand*>* immediate_args, Label* deopt_exit) {
- // EagerWithResume deopts should have immdiate args, and to ensure fixed
- // deopt exit sizes, currently always have two immediate arguments in the
- // deopt exit.
- constexpr int kImmediateArgCount = 2;
- DCHECK_NOT_NULL(immediate_args);
- DCHECK_EQ(kImmediateArgCount, immediate_args->size());
- const int expected_offsets[] = {
- Deoptimizer::kEagerWithResumeImmedArgs1PcOffset,
- Deoptimizer::kEagerWithResumeImmedArgs2PcOffset};
- for (int i = 0; i < kImmediateArgCount; i++) {
- ImmediateOperand* op = immediate_args->at(i);
- Constant constant = instructions()->GetImmediate(op);
-
- DCHECK_EQ(tasm()->SizeOfCodeGeneratedSince(deopt_exit),
- expected_offsets[i] + Deoptimizer::kNonLazyDeoptExitSize);
- USE(expected_offsets);
-
- switch (constant.type()) {
- case Constant::kInt32:
- tasm()->dp(constant.ToInt32(), RelocInfo::LITERAL_CONSTANT);
- break;
-#ifdef V8_TARGET_ARCH_64_BIT
- case Constant::kInt64:
- tasm()->dp(constant.ToInt64());
- break;
-#endif
- case Constant::kFloat64: {
- int smi;
- CHECK(DoubleToSmiInteger(constant.ToFloat64().value(), &smi));
- tasm()->dp(Smi::FromInt(smi).ptr(), RelocInfo::LITERAL_CONSTANT);
- break;
- }
- case Constant::kCompressedHeapObject:
- case Constant::kHeapObject:
- // Emit as a DATA_EMBEDDED_OBJECT to specify that this is a raw full
- // pointer that is fixed size.
- tasm()->dp(constant.ToHeapObject().address(),
- RelocInfo::DATA_EMBEDDED_OBJECT);
- break;
- default:
- // Currently only Smis and Ints are supported, but other immediate
- // constants can be added when required.
- UNREACHABLE();
- }
- }
-
- DCHECK_EQ(tasm()->SizeOfCodeGeneratedSince(deopt_exit),
- Deoptimizer::kEagerWithResumeDeoptExitSize);
-}
-
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizationExit* exit) {
int deoptimization_id = exit->deoptimization_id();
@@ -212,15 +160,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeKind deopt_kind = exit->kind();
DeoptimizeReason deoptimization_reason = exit->reason();
- Label* jump_deoptimization_entry_label;
- if (deopt_kind == DeoptimizeKind::kEagerWithResume) {
- jump_deoptimization_entry_label =
- &jump_deoptimization_or_resume_entry_labels_[static_cast<int>(
- deoptimization_reason)];
- } else {
- jump_deoptimization_entry_label =
- &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
- }
+ Label* jump_deoptimization_entry_label =
+ &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
exit->pos(), deoptimization_id);
@@ -230,22 +171,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
++lazy_deopt_count_;
tasm()->BindExceptionHandler(exit->label());
} else {
- if (deopt_kind != DeoptimizeKind::kEagerWithResume) {
- ++eager_soft_and_bailout_deopt_count_;
- }
+ ++non_lazy_deopt_count_;
tasm()->bind(exit->label());
}
- Builtin target =
- deopt_kind == DeoptimizeKind::kEagerWithResume
- ? Deoptimizer::GetDeoptWithResumeBuiltin(deoptimization_reason)
- : Deoptimizer::GetDeoptimizationEntry(deopt_kind);
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
deopt_kind, exit->continue_label(),
jump_deoptimization_entry_label);
- if (deopt_kind == DeoptimizeKind::kEagerWithResume) {
- AssembleDeoptImmediateArgs(exit->immediate_args(), exit->label());
- }
exit->set_emitted();
return kSuccess;
@@ -407,12 +340,10 @@ void CodeGenerator::AssembleCode() {
// lazy deopts and eagerwithresume might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
// The deoptimization exits are sorted so that lazy deopt exits appear after
- // eager deopts, and eager with resume deopts appear last.
- static_assert(DeoptimizeKind::kEagerWithResume == kLastDeoptimizeKind,
- "eager with resume deopts are expected to be emitted last");
+ // eager deopts.
static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
- static_cast<int>(kLastDeoptimizeKind) - 1,
- "lazy deopts are expected to be emitted second from last");
+ static_cast<int>(kLastDeoptimizeKind),
+ "lazy deopts are expected to be emitted last");
if (a->kind() != b->kind()) {
return a->kind() < b->kind();
}
@@ -975,8 +906,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
- data->SetEagerSoftAndBailoutDeoptCount(
- Smi::FromInt(eager_soft_and_bailout_deopt_count_));
+ data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count_));
data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count_));
if (info->has_shared_info()) {
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 5fd34a41f4..4bceba3025 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -236,9 +236,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
- void AssembleDeoptImmediateArgs(
- const ZoneVector<ImmediateOperand*>* immediate_args, Label* deopt_exit);
-
// ===========================================================================
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
@@ -424,7 +421,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
int next_deoptimization_id_ = 0;
int deopt_exit_start_offset_ = 0;
- int eager_soft_and_bailout_deopt_count_ = 0;
+ int non_lazy_deopt_count_ = 0;
int lazy_deopt_count_ = 0;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
@@ -440,7 +437,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// per Code object. All deopt exits can then near-call to this label. Note:
// not used on all architectures.
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
- Label jump_deoptimization_or_resume_entry_labels_[kDeoptimizeReasonCount];
// The maximal combined height of all frames produced upon deoptimization, and
// the maximal number of pushed arguments for function calls. Applied as an
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 0544dd5340..12edbcb37e 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -1456,8 +1456,6 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
- case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
- return VisitDynamicCheckMapsWithDeoptUnless(node);
case IrOpcode::kTrapIf:
return VisitTrapIf(node, TrapIdOf(node->op()));
case IrOpcode::kTrapUnless:
@@ -3164,46 +3162,6 @@ void InstructionSelector::VisitSelect(Node* node) {
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
-void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
- OperandGenerator g(this);
- DynamicCheckMapsWithDeoptUnlessNode n(node);
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
-
- CallDescriptor* call_descriptor;
- ZoneVector<InstructionOperand> dynamic_check_args(zone());
-
- if (p.reason() == DeoptimizeReason::kDynamicCheckMaps) {
- DynamicCheckMapsDescriptor descriptor;
- // Note: We use Operator::kNoDeopt here because this builtin does not lazy
- // deoptimize (which is the meaning of Operator::kNoDeopt), even though it
- // can eagerly deoptimize.
- call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoThrow);
- dynamic_check_args.insert(
- dynamic_check_args.end(),
- {g.UseLocation(n.map(), call_descriptor->GetInputLocation(1)),
- g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
- } else {
- DCHECK_EQ(p.reason(), DeoptimizeReason::kDynamicCheckMapsInlined);
- DynamicCheckMapsWithFeedbackVectorDescriptor descriptor;
- call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoThrow);
- dynamic_check_args.insert(
- dynamic_check_args.end(),
- {g.UseLocation(n.map(), call_descriptor->GetInputLocation(1)),
- g.UseLocation(n.feedback_vector(),
- call_descriptor->GetInputLocation(2)),
- g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
- }
-
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
-}
-
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
FlagsContinuation cont =
FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 329ccc7e86..587eb578ec 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -87,8 +87,7 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
DCHECK(op->opcode() == IrOpcode::kDeoptimize ||
op->opcode() == IrOpcode::kDeoptimizeIf ||
- op->opcode() == IrOpcode::kDeoptimizeUnless ||
- op->opcode() == IrOpcode::kDynamicCheckMapsWithDeoptUnless);
+ op->opcode() == IrOpcode::kDeoptimizeUnless);
return OpParameter<DeoptimizeParameters>(op);
}
@@ -501,10 +500,6 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Eager, WrongInstanceType) \
V(Eager, WrongMap)
-#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
- V(DynamicCheckMaps) \
- V(DynamicCheckMapsInlined)
-
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
V(TrapFloatUnrepresentable)
@@ -735,22 +730,6 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
- template <DeoptimizeReason kReason>
- struct DynamicMapCheckOperator final : Operator1<DeoptimizeParameters> {
- DynamicMapCheckOperator()
- : Operator1<DeoptimizeParameters>( // --
- IrOpcode::kDynamicCheckMapsWithDeoptUnless, // opcode
- Operator::kFoldable | Operator::kNoThrow, // properties
- "DynamicCheckMapsWithDeoptUnless", // name
- 6, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
- FeedbackSource())) {}
- };
-#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
- DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
- CACHED_DYNAMIC_CHECK_MAPS_LIST(CACHED_DYNAMIC_CHECK_MAPS)
-#undef CACHED_DYNAMIC_CHECK_MAPS
-
template <TrapId trap_id>
struct TrapIfOperator final : public Operator1<TrapId> {
TrapIfOperator()
@@ -983,15 +962,6 @@ const Operator* CommonOperatorBuilder::DeoptimizeUnless(
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::DynamicCheckMapsWithDeoptUnless(
- bool is_inlined_frame_state) {
- if (is_inlined_frame_state) {
- return &cache_.kDynamicCheckMapsInlinedOperator;
- } else {
- return &cache_.kDynamicCheckMapsOperator;
- }
-}
-
const Operator* CommonOperatorBuilder::TrapIf(TrapId trap_id) {
switch (trap_id) {
#define CACHED_TRAP_IF(Trap) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index f691c1fbf4..58e04f9cf6 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -455,10 +455,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
FeedbackSource const& feedback);
const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
- // DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
- // the condition is false, which may then either deoptimize or resume
- // execution.
- const Operator* DynamicCheckMapsWithDeoptUnless(bool is_inlined_frame_state);
const Operator* TrapIf(TrapId trap_id);
const Operator* TrapUnless(TrapId trap_id);
const Operator* Return(int value_input_count = 1);
@@ -723,27 +719,6 @@ class StartNode final : public CommonNodeWrapperBase {
int LastOutputIndex() const { return ContextOutputIndex(); }
};
-class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
- public:
- explicit constexpr DynamicCheckMapsWithDeoptUnlessNode(Node* node)
- : CommonNodeWrapperBase(node) {
- DCHECK_EQ(IrOpcode::kDynamicCheckMapsWithDeoptUnless, node->opcode());
- }
-
-#define INPUTS(V) \
- V(Condition, condition, 0, BoolT) \
- V(Slot, slot, 1, IntPtrT) \
- V(Map, map, 2, Map) \
- V(Handler, handler, 3, Object) \
- V(FeedbackVector, feedback_vector, 4, FeedbackVector)
- INPUTS(DEFINE_INPUT_ACCESSORS)
-#undef INPUTS
-
- FrameState frame_state() {
- return FrameState{NodeProperties::GetValueInput(node(), 5)};
- }
-};
-
#undef DEFINE_INPUT_ACCESSORS
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 21696969ec..6bf38dd2bb 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -80,7 +80,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
- void LowerDynamicCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckClosure(Node* node, Node* frame_state);
@@ -980,9 +979,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckMaps:
LowerCheckMaps(node, frame_state);
break;
- case IrOpcode::kDynamicCheckMaps:
- LowerDynamicCheckMaps(node, frame_state);
- break;
case IrOpcode::kCompareMaps:
result = LowerCompareMaps(node);
break;
@@ -1933,56 +1929,6 @@ void EffectControlLinearizer::TryMigrateInstance(Node* value, Node* value_map) {
__ Bind(&done);
}
-void EffectControlLinearizer::LowerDynamicCheckMaps(Node* node,
- Node* frame_state_node) {
- DynamicCheckMapsParameters const& p =
- DynamicCheckMapsParametersOf(node->op());
- FrameState frame_state(frame_state_node);
- Node* value = node->InputAt(0);
-
- FeedbackSource const& feedback = p.feedback();
- Node* feedback_vector = __ HeapConstant(feedback.vector);
- Node* slot_index = __ IntPtrConstant(feedback.index());
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* actual_handler =
- p.handler()->IsSmi()
- ? __ SmiConstant(Smi::ToInt(*p.handler()))
- : __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
-
- auto done = __ MakeLabel();
-
- ZoneHandleSet<Map> maps = p.maps();
- size_t const map_count = maps.size();
- for (size_t i = 0; i < map_count; ++i) {
- Node* map = __ HeapConstant(maps[i]);
- Node* check = __ TaggedEqual(value_map, map);
- if (i == map_count - 1) {
- if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
- auto migrate = __ MakeDeferredLabel();
- __ BranchWithCriticalSafetyCheck(check, &done, &migrate);
-
- __ Bind(&migrate);
- TryMigrateInstance(value, value_map);
-
- // Reload the current map of the {value} before performing the dynanmic
- // map check.
- value_map = __ LoadField(AccessBuilder::ForMap(), value);
- }
-
- __ DynamicCheckMapsWithDeoptUnless(check, slot_index, value_map,
- actual_handler, feedback_vector,
- frame_state);
- __ Goto(&done);
- } else {
- auto next_map = __ MakeLabel();
- __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
- __ Bind(&next_map);
- }
- }
-
- __ Bind(&done);
-}
-
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
size_t const map_count = maps.size();
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index b2ece7e3b6..24da29a232 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -535,18 +535,6 @@ Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
frame_state);
}
-Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
- Node* slot_index,
- Node* value, Node* map,
- Node* feedback_vector,
- FrameState frame_state) {
- return AddNode(graph()->NewNode(
- common()->DynamicCheckMapsWithDeoptUnless(
- frame_state.outer_frame_state()->opcode() == IrOpcode::kFrameState),
- condition, slot_index, value, map, feedback_vector, frame_state, effect(),
- control()));
-}
-
TNode<Object> GraphAssembler::Call(const CallDescriptor* call_descriptor,
int inputs_size, Node** inputs) {
return Call(common()->Call(call_descriptor), inputs_size, inputs);
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index cabae8699d..3715226bd0 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -333,10 +333,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
- Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
- Node* map, Node* handler,
- Node* feedback_vector,
- FrameState frame_state);
TNode<Object> Call(const CallDescriptor* call_descriptor, int inputs_size,
Node** inputs);
TNode<Object> Call(const Operator* op, int inputs_size, Node** inputs);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 6ae447cad7..653b812a35 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -250,11 +250,6 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
return feedback.AsNamedAccess().maps().empty();
} else if (feedback.kind() == ProcessedFeedback::kInsufficient) {
return false;
- } else if (feedback.kind() == ProcessedFeedback::kMinimorphicPropertyAccess) {
- // MinimorphicPropertyAccess is used for dynamic map checks and the IC state
- // is either monomorphic or polymorphic. So it will still benefit from
- // collecting feedback, so don't use megamorphic builtin.
- return false;
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 84029e1e77..00bb53d2d8 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -49,6 +49,11 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
cage_base_(isolate),
#endif // V8_COMPRESS_POINTERS
zone_(broker_zone),
+ // Note that this initialization of {refs_} with the minimal initial
+ // capacity is redundant in the normal use case (concurrent compilation
+ // enabled, standard objects to be serialized), as the map is going to be
+ // replaced immediately with a larger-capacity one. It doesn't seem to
+ // affect the performance in a noticeable way though.
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
zone())),
root_index_map_(isolate),
@@ -56,13 +61,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
tracing_enabled_(tracing_enabled),
code_kind_(code_kind),
feedback_(zone()),
- property_access_infos_(zone()),
- minimorphic_property_access_infos_(zone()) {
- // Note that this initialization of {refs_} with the minimal initial capacity
- // is redundant in the normal use case (concurrent compilation enabled,
- // standard objects to be serialized), as the map is going to be replaced
- // immediately with a larger-capacity one. It doesn't seem to affect the
- // performance in a noticeable way though.
+ property_access_infos_(zone()) {
TRACE(this, "Constructing heap broker");
}
@@ -426,18 +425,6 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
return true;
}
-// TODO(v8:12552): Remove.
-MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
- NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
- ZoneVector<MapRef> const& maps, bool has_migration_target_maps)
- : ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
- name_(name),
- handler_(handler),
- maps_(maps),
- has_migration_target_maps_(has_migration_target_maps) {
- DCHECK(IsLoadICKind(slot_kind));
-}
-
NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind)
@@ -909,29 +896,6 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
return access_info;
}
-// TODO(v8:12552): Remove.
-MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source) {
- auto it = minimorphic_property_access_infos_.find(source);
- if (it != minimorphic_property_access_infos_.end()) return it->second;
-
- AccessInfoFactory factory(this, nullptr, zone());
- MinimorphicLoadPropertyAccessInfo access_info =
- factory.ComputePropertyAccessInfo(feedback);
-
- // We can assume a memory fence on {source.vector} because in production,
- // the vector has already passed the gc predicate. Unit tests create
- // FeedbackSource objects directly from handles, but they run on
- // the main thread.
- TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
- << source.index() << " "
- << MakeRefAssumeMemoryFence<Object>(this, source.vector));
- minimorphic_property_access_infos_.insert({source, access_info});
-
- return access_info;
-}
-
BinaryOperationFeedback const& ProcessedFeedback::AsBinaryOperation() const {
CHECK_EQ(kBinaryOperation, kind());
return *static_cast<BinaryOperationFeedback const*>(this);
@@ -972,13 +936,6 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
return *static_cast<NamedAccessFeedback const*>(this);
}
-// TODO(v8:12552): Remove.
-MinimorphicLoadPropertyAccessFeedback const&
-ProcessedFeedback::AsMinimorphicPropertyAccess() const {
- CHECK_EQ(kMinimorphicPropertyAccess, kind());
- return *static_cast<MinimorphicLoadPropertyAccessFeedback const*>(this);
-}
-
LiteralFeedback const& ProcessedFeedback::AsLiteral() const {
CHECK_EQ(kLiteral, kind());
return *static_cast<LiteralFeedback const*>(this);
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 0f22411f47..753bdb73d6 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -234,10 +234,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
MapRef map, NameRef name, AccessMode access_mode,
CompilationDependencies* dependencies);
- MinimorphicLoadPropertyAccessInfo GetPropertyAccessInfo(
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source);
-
StringRef GetTypedArrayStringTag(ElementsKind kind);
bool IsMainThread() const {
@@ -456,9 +452,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ZoneUnorderedMap<PropertyAccessTarget, PropertyAccessInfo,
PropertyAccessTarget::Hash, PropertyAccessTarget::Equal>
property_access_infos_;
- ZoneUnorderedMap<FeedbackSource, MinimorphicLoadPropertyAccessInfo,
- FeedbackSource::Hash, FeedbackSource::Equal>
- minimorphic_property_access_infos_;
CompilationDependencies* dependencies_ = nullptr;
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 478647df7b..5c116edd68 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1037,55 +1037,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
}
-Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
- Node* node, Node* value,
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
- STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
- JSLoadPropertyNode::ObjectIndex() == 0);
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- Node* lookup_start_object;
- if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
- DCHECK(FLAG_super_ic);
- JSLoadNamedFromSuperNode n(node);
- // Lookup start object is the __proto__ of the home object.
- lookup_start_object = effect =
- BuildLoadPrototypeFromObject(n.home_object(), effect, control);
- } else {
- lookup_start_object = NodeProperties::GetValueInput(node, 0);
- }
-
- MinimorphicLoadPropertyAccessInfo access_info =
- broker()->GetPropertyAccessInfo(feedback, source);
- if (access_info.IsInvalid()) return NoChange();
-
- PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
- CheckMapsFlags flags = CheckMapsFlag::kNone;
- if (feedback.has_migration_target_maps()) {
- flags |= CheckMapsFlag::kTryMigrateInstance;
- }
-
- ZoneHandleSet<Map> maps;
- for (const MapRef& map : feedback.maps()) {
- maps.insert(map.object(), graph()->zone());
- }
-
- effect = graph()->NewNode(
- simplified()->DynamicCheckMaps(flags, feedback.handler(), maps, source),
- lookup_start_object, effect, control);
- value = access_builder.BuildMinimorphicLoadDataField(
- feedback.name(), access_info, lookup_start_object, &effect, &control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key) {
@@ -2013,11 +1964,6 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
case ProcessedFeedback::kNamedAccess:
return ReduceNamedAccess(node, value, feedback.AsNamedAccess(),
access_mode, key);
- case ProcessedFeedback::kMinimorphicPropertyAccess:
- DCHECK_EQ(access_mode, AccessMode::kLoad);
- DCHECK_NULL(key);
- return ReduceMinimorphicPropertyAccess(
- node, value, feedback.AsMinimorphicPropertyAccess(), source);
case ProcessedFeedback::kElementAccess:
DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(),
access_mode);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 9f788812e1..9b9096c3c3 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -107,10 +107,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceNamedAccess(Node* node, Node* value,
NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key = nullptr);
- Reduction ReduceMinimorphicPropertyAccess(
- Node* node, Node* value,
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source);
Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key,
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 4736987744..49e23a568c 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -29,7 +29,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
- case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 8baac472d4..27fb3b247f 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -10,28 +10,27 @@
#include "src/common/globals.h"
// Opcodes for control operators.
-#define CONTROL_OP_LIST(V) \
- V(Start) \
- V(Loop) \
- V(Branch) \
- V(Switch) \
- V(IfTrue) \
- V(IfFalse) \
- V(IfSuccess) \
- V(IfException) \
- V(IfValue) \
- V(IfDefault) \
- V(Merge) \
- V(Deoptimize) \
- V(DeoptimizeIf) \
- V(DeoptimizeUnless) \
- V(DynamicCheckMapsWithDeoptUnless) \
- V(TrapIf) \
- V(TrapUnless) \
- V(Return) \
- V(TailCall) \
- V(Terminate) \
- V(Throw) \
+#define CONTROL_OP_LIST(V) \
+ V(Start) \
+ V(Loop) \
+ V(Branch) \
+ V(Switch) \
+ V(IfTrue) \
+ V(IfFalse) \
+ V(IfSuccess) \
+ V(IfException) \
+ V(IfValue) \
+ V(IfDefault) \
+ V(Merge) \
+ V(Deoptimize) \
+ V(DeoptimizeIf) \
+ V(DeoptimizeUnless) \
+ V(TrapIf) \
+ V(TrapUnless) \
+ V(Return) \
+ V(TailCall) \
+ V(Terminate) \
+ V(Throw) \
V(End)
// Opcodes for constant operators.
@@ -421,7 +420,6 @@
V(ConvertTaggedHoleToUndefined) \
V(DateNow) \
V(DelayedStringConstant) \
- V(DynamicCheckMaps) \
V(EnsureWritableFastElements) \
V(FastApiCall) \
V(FindOrderedHashMapEntry) \
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 832fc441da..1ca506ebb1 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -20,7 +20,6 @@ class ForInFeedback;
class GlobalAccessFeedback;
class InstanceOfFeedback;
class LiteralFeedback;
-class MinimorphicLoadPropertyAccessFeedback;
class NamedAccessFeedback;
class RegExpLiteralFeedback;
class TemplateObjectFeedback;
@@ -37,7 +36,6 @@ class ProcessedFeedback : public ZoneObject {
kGlobalAccess,
kInstanceOf,
kLiteral,
- kMinimorphicPropertyAccess,
kNamedAccess,
kRegExpLiteral,
kTemplateObject,
@@ -55,8 +53,6 @@ class ProcessedFeedback : public ZoneObject {
GlobalAccessFeedback const& AsGlobalAccess() const;
InstanceOfFeedback const& AsInstanceOf() const;
NamedAccessFeedback const& AsNamedAccess() const;
- MinimorphicLoadPropertyAccessFeedback const& AsMinimorphicPropertyAccess()
- const;
LiteralFeedback const& AsLiteral() const;
RegExpLiteralFeedback const& AsRegExpLiteral() const;
TemplateObjectFeedback const& AsTemplateObject() const;
@@ -173,27 +169,6 @@ class NamedAccessFeedback : public ProcessedFeedback {
ZoneVector<MapRef> const maps_;
};
-class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
- public:
- MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
- FeedbackSlotKind slot_kind,
- Handle<Object> handler,
- ZoneVector<MapRef> const& maps,
- bool has_migration_target_maps);
-
- NameRef const& name() const { return name_; }
- bool is_monomorphic() const { return maps_.size() == 1; }
- Handle<Object> handler() const { return handler_; }
- ZoneVector<MapRef> const& maps() const { return maps_; }
- bool has_migration_target_maps() const { return has_migration_target_maps_; }
-
- private:
- NameRef const name_;
- Handle<Object> const handler_;
- ZoneVector<MapRef> const maps_;
- bool const has_migration_target_maps_;
-};
-
class CallFeedback : public ProcessedFeedback {
public:
CallFeedback(base::Optional<HeapObjectRef> target, float frequency,
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 456512a867..b77a9423b8 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -276,26 +276,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
return value;
}
-Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
- NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* lookup_start_object, Node** effect, Node** control) {
- DCHECK_NULL(dependencies());
- MachineRepresentation const field_representation =
- ConvertRepresentation(access_info.field_representation());
-
- FieldAccess field_access = {
- kTaggedBase,
- access_info.offset(),
- name.object(),
- MaybeHandle<Map>(),
- access_info.field_type(),
- MachineType::TypeForRepresentation(field_representation),
- kFullWriteBarrier,
- ConstFieldInfo::None()};
- return BuildLoadDataField(name, lookup_start_object, field_access,
- access_info.is_inobject(), effect, control);
-}
-
Node* PropertyAccessBuilder::BuildLoadDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* lookup_start_object, Node** effect, Node** control) {
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 77ef1bab35..bf02ad13b5 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -59,12 +59,6 @@ class PropertyAccessBuilder {
base::Optional<Node*> FoldLoadDictPrototypeConstant(
PropertyAccessInfo const& access_info);
- // Builds the load for data-field access for minimorphic loads that use
- // dynamic map checks. These cannot depend on any information from the maps.
- Node* BuildMinimorphicLoadDataField(
- NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* lookup_start_object, Node** effect, Node** control);
-
static MachineRepresentation ConvertRepresentation(
Representation representation);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 15d682bd29..8298bd0d2e 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -3881,11 +3881,6 @@ class RepresentationSelector {
node, UseInfo::CheckedHeapObjectAsTaggedPointer(p.feedback()),
MachineRepresentation::kNone);
}
- case IrOpcode::kDynamicCheckMaps: {
- return VisitUnop<T>(
- node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
- MachineRepresentation::kNone);
- }
case IrOpcode::kTransitionElementsKind: {
return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index e387ea75c3..6283426468 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -270,36 +270,6 @@ CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
return OpParameter<CheckMapsParameters>(op);
}
-bool operator==(DynamicCheckMapsParameters const& lhs,
- DynamicCheckMapsParameters const& rhs) {
- // FeedbackSource is sufficient as an equality check. FeedbackSource uniquely
- // determines all other properties (handler, flags and the monomorphic map
- DCHECK_IMPLIES(lhs.feedback() == rhs.feedback(),
- lhs.flags() == rhs.flags() && lhs.state() == rhs.state() &&
- lhs.handler().address() == rhs.handler().address() &&
- lhs.maps() == rhs.maps());
- return lhs.feedback() == rhs.feedback();
-}
-
-size_t hash_value(DynamicCheckMapsParameters const& p) {
- FeedbackSource::Hash feedback_hash;
- // FeedbackSource is sufficient for hashing. FeedbackSource uniquely
- // determines all other properties (handler, flags and the monomorphic map
- return base::hash_combine(feedback_hash(p.feedback()));
-}
-
-std::ostream& operator<<(std::ostream& os,
- DynamicCheckMapsParameters const& p) {
- return os << p.handler() << ", " << p.feedback() << "," << p.state() << ","
- << p.flags() << "," << p.maps();
-}
-
-DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(
- Operator const* op) {
- DCHECK_EQ(IrOpcode::kDynamicCheckMaps, op->opcode());
- return OpParameter<DynamicCheckMapsParameters>(op);
-}
-
ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCompareMaps, op->opcode());
return OpParameter<ZoneHandleSet<Map>>(op);
@@ -1484,18 +1454,6 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
parameters); // parameter
}
-const Operator* SimplifiedOperatorBuilder::DynamicCheckMaps(
- CheckMapsFlags flags, Handle<Object> handler,
- ZoneHandleSet<Map> const& maps, const FeedbackSource& feedback) {
- DynamicCheckMapsParameters const parameters(flags, handler, maps, feedback);
- return zone()->New<Operator1<DynamicCheckMapsParameters>>( // --
- IrOpcode::kDynamicCheckMaps, // opcode
- Operator::kNoThrow | Operator::kNoWrite, // flags
- "DynamicCheckMaps", // name
- 1, 1, 1, 0, 1, 0, // counts
- parameters); // parameter
-}
-
const Operator* SimplifiedOperatorBuilder::MapGuard(ZoneHandleSet<Map> maps) {
DCHECK_LT(0, maps.size());
return zone()->New<Operator1<ZoneHandleSet<Map>>>( // --
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index a9ea33531c..1016c69949 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -439,41 +439,6 @@ std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
-// A descriptor for dynamic map checks.
-class DynamicCheckMapsParameters final {
- public:
- enum ICState { kMonomorphic, kPolymorphic };
-
- DynamicCheckMapsParameters(CheckMapsFlags flags, Handle<Object> handler,
- ZoneHandleSet<Map> const& maps,
- const FeedbackSource& feedback)
- : flags_(flags), handler_(handler), maps_(maps), feedback_(feedback) {}
-
- CheckMapsFlags flags() const { return flags_; }
- Handle<Object> handler() const { return handler_; }
- ZoneHandleSet<Map> const& maps() const { return maps_; }
- FeedbackSource const& feedback() const { return feedback_; }
- ICState state() const {
- return maps_.size() == 1 ? ICState::kMonomorphic : ICState::kPolymorphic;
- }
-
- private:
- CheckMapsFlags const flags_;
- Handle<Object> const handler_;
- ZoneHandleSet<Map> const maps_;
- FeedbackSource const feedback_;
-};
-
-bool operator==(DynamicCheckMapsParameters const&,
- DynamicCheckMapsParameters const&);
-
-size_t hash_value(DynamicCheckMapsParameters const&);
-
-std::ostream& operator<<(std::ostream&, DynamicCheckMapsParameters const&);
-
-DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(Operator const*)
- V8_WARN_UNUSED_RESULT;
-
ZoneHandleSet<Map> const& MapGuardMapsOf(Operator const*) V8_WARN_UNUSED_RESULT;
// Parameters for CompareMaps operator.
@@ -927,9 +892,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckInternalizedString();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const FeedbackSource& = FeedbackSource());
- const Operator* DynamicCheckMaps(CheckMapsFlags flags, Handle<Object> handler,
- ZoneHandleSet<Map> const& maps,
- const FeedbackSource& feedback);
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const FeedbackSource& feedback);
const Operator* CheckReceiver();
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 123518685d..2daf637ebd 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -115,7 +115,6 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(Deoptimize)
DECLARE_IMPOSSIBLE_CASE(DeoptimizeIf)
DECLARE_IMPOSSIBLE_CASE(DeoptimizeUnless)
- DECLARE_IMPOSSIBLE_CASE(DynamicCheckMapsWithDeoptUnless)
DECLARE_IMPOSSIBLE_CASE(TrapIf)
DECLARE_IMPOSSIBLE_CASE(TrapUnless)
DECLARE_IMPOSSIBLE_CASE(Return)
@@ -2102,7 +2101,6 @@ Type Typer::Visitor::TypeCheckInternalizedString(Node* node) {
}
Type Typer::Visitor::TypeCheckMaps(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeDynamicCheckMaps(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeCompareMaps(Node* node) { return Type::Boolean(); }
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 91d160a055..2ee300d88e 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -370,7 +370,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
- case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
case IrOpcode::kPlug:
case IrOpcode::kTrapIf:
case IrOpcode::kTrapUnless:
@@ -1448,10 +1447,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckNotTyped(node);
break;
- case IrOpcode::kDynamicCheckMaps:
- CheckValueInputIs(node, 0, Type::Any());
- CheckNotTyped(node);
- break;
case IrOpcode::kCompareMaps:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index d0843f43e9..0337d2d291 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -17,18 +17,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
const int kShift = n % 2 == 0 ? 0 : 32;
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index d7b36e942d..c695347a0b 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -16,12 +16,6 @@ const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
#else
const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
#endif
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index abf4d263ea..30c0991079 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -17,8 +17,6 @@ namespace internal {
V(CouldNotGrowElements, "failed to grow elements store") \
V(DeoptimizeNow, "%_DeoptimizeNow") \
V(DivisionByZero, "division by zero") \
- V(DynamicCheckMaps, "dynamic check maps failed") \
- V(DynamicCheckMapsInlined, "dynamic check maps failed") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index b8b84a5872..e30df04a64 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -468,10 +468,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
return "deopt-soft";
case DeoptimizeKind::kLazy:
return "deopt-lazy";
- case DeoptimizeKind::kBailout:
- return "bailout";
- case DeoptimizeKind::kEagerWithResume:
- return "eager-with-resume";
}
}
@@ -541,21 +537,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizationData::cast(compiled_code_.deoptimization_data());
Address deopt_start = compiled_code_.raw_instruction_start() +
deopt_data.DeoptExitStart().value();
- int eager_soft_and_bailout_deopt_count =
- deopt_data.EagerSoftAndBailoutDeoptCount().value();
+ int non_lazy_deopt_count = deopt_data.NonLazyDeoptCount().value();
Address lazy_deopt_start =
- deopt_start +
- eager_soft_and_bailout_deopt_count * kNonLazyDeoptExitSize;
- int lazy_deopt_count = deopt_data.LazyDeoptCount().value();
- Address eager_with_resume_deopt_start =
- lazy_deopt_start + lazy_deopt_count * kLazyDeoptExitSize;
+ deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
// The deoptimization exits are sorted so that lazy deopt exits appear after
- // eager deopts, and eager with resume deopts appear last.
- static_assert(DeoptimizeKind::kEagerWithResume == kLastDeoptimizeKind,
- "eager with resume deopts are expected to be emitted last");
+ // eager deopts.
static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
- static_cast<int>(kLastDeoptimizeKind) - 1,
- "lazy deopts are expected to be emitted second from last");
+ static_cast<int>(kLastDeoptimizeKind),
+ "lazy deopts are expected to be emitted last");
// from_ is the value of the link register after the call to the
// deoptimizer, so for the last lazy deopt, from_ points to the first
// non-lazy deopt, so we use <=, similarly for the last non-lazy deopt and
@@ -565,19 +554,11 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
static_cast<int>(from_ - kNonLazyDeoptExitSize - deopt_start);
DCHECK_EQ(0, offset % kNonLazyDeoptExitSize);
deopt_exit_index_ = offset / kNonLazyDeoptExitSize;
- } else if (from_ <= eager_with_resume_deopt_start) {
+ } else {
int offset =
static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
DCHECK_EQ(0, offset % kLazyDeoptExitSize);
- deopt_exit_index_ =
- eager_soft_and_bailout_deopt_count + (offset / kLazyDeoptExitSize);
- } else {
- int offset = static_cast<int>(from_ - kNonLazyDeoptExitSize -
- eager_with_resume_deopt_start);
- DCHECK_EQ(0, offset % kEagerWithResumeDeoptExitSize);
- deopt_exit_index_ = eager_soft_and_bailout_deopt_count +
- lazy_deopt_count +
- (offset / kEagerWithResumeDeoptExitSize);
+ deopt_exit_index_ = non_lazy_deopt_count + (offset / kLazyDeoptExitSize);
}
}
}
@@ -617,32 +598,14 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
-Builtin Deoptimizer::GetDeoptWithResumeBuiltin(DeoptimizeReason reason) {
- switch (reason) {
- case DeoptimizeReason::kDynamicCheckMaps:
- return Builtin::kDynamicCheckMapsTrampoline;
- case DeoptimizeReason::kDynamicCheckMapsInlined:
- return Builtin::kDynamicCheckMapsWithFeedbackVectorTrampoline;
- default:
- UNREACHABLE();
- }
-}
-
Builtin Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return Builtin::kDeoptimizationEntry_Eager;
case DeoptimizeKind::kSoft:
return Builtin::kDeoptimizationEntry_Soft;
- case DeoptimizeKind::kBailout:
- return Builtin::kDeoptimizationEntry_Bailout;
case DeoptimizeKind::kLazy:
return Builtin::kDeoptimizationEntry_Lazy;
- case DeoptimizeKind::kEagerWithResume:
- // EagerWithResume deopts will call a special builtin (specified by
- // GetDeoptWithResumeBuiltin) which will itself select the deoptimization
- // entry builtin if it decides to deopt instead of resuming execution.
- UNREACHABLE();
}
}
@@ -658,9 +621,6 @@ bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
case Builtin::kDeoptimizationEntry_Soft:
*type_out = DeoptimizeKind::kSoft;
return true;
- case Builtin::kDeoptimizationEntry_Bailout:
- *type_out = DeoptimizeKind::kBailout;
- return true;
case Builtin::kDeoptimizationEntry_Lazy:
*type_out = DeoptimizeKind::kLazy;
return true;
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index abfd668dd9..36e85480be 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -93,11 +93,6 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- // Returns the builtin that will perform a check and either eagerly deopt with
- // |reason| or resume execution in the optimized code.
- V8_EXPORT_PRIVATE static Builtin GetDeoptWithResumeBuiltin(
- DeoptimizeReason reason);
-
V8_EXPORT_PRIVATE static Builtin GetDeoptimizationEntry(DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
@@ -139,10 +134,6 @@ class Deoptimizer : public Malloced {
// kSupportsFixedDeoptExitSizes is true.
V8_EXPORT_PRIVATE static const int kNonLazyDeoptExitSize;
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
- V8_EXPORT_PRIVATE static const int kEagerWithResumeBeforeArgsSize;
- V8_EXPORT_PRIVATE static const int kEagerWithResumeDeoptExitSize;
- V8_EXPORT_PRIVATE static const int kEagerWithResumeImmedArgs1PcOffset;
- V8_EXPORT_PRIVATE static const int kEagerWithResumeImmedArgs2PcOffset;
// Tracing.
static void TraceMarkForDeoptimization(Code code, const char* reason);
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 9fba75d4ab..4fcb22c209 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -12,12 +12,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 5;
const int Deoptimizer::kLazyDeoptExitSize = 5;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 10;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 5;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- 5 + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
index fb82466af1..73d71036ed 100644
--- a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
+++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
@@ -10,13 +10,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index f917c59f16..c20b5c5ecf 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -10,12 +10,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 5 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- 2 * kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index f917c59f16..c20b5c5ecf 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -10,12 +10,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 5 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- 2 * kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index d7cd04bdf7..c315743111 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -17,18 +17,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 4 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
float float_val = static_cast<float>(double_registers_[n].get_scalar());
diff --git a/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
index 12573ed29b..73d71036ed 100644
--- a/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
+++ b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
@@ -10,12 +10,6 @@ namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 4 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index c776bdb48b..9db7bf722f 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -17,18 +17,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 6 + 2 + 6;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 6;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
- 6 + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 484ede213a..1fba0c6e2d 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -19,17 +19,11 @@ namespace internal {
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
#undef ASSERT_OFFSET
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 4;
const int Deoptimizer::kLazyDeoptExitSize = 4;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 9;
-const int Deoptimizer::kEagerWithResumeDeoptExitSize =
- kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
-const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 5;
-const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset = 13;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index fb4a2b4bea..41e640c9f7 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -33,10 +33,6 @@ namespace internal {
namespace {
-// Specifies whether V8 expects the holder memory of a global handle to be live
-// or dead.
-enum class HandleHolder { kLive, kDead };
-
constexpr size_t kBlockSize = 256;
} // namespace
@@ -574,8 +570,7 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
set_state(NEAR_DEATH);
}
- void ResetPhantomHandle(HandleHolder handle_holder) {
- DCHECK_EQ(HandleHolder::kLive, handle_holder);
+ void ResetPhantomHandle() {
DCHECK_EQ(PHANTOM_WEAK_RESET_HANDLE, weakness_type());
DCHECK_EQ(PENDING, state());
DCHECK_NULL(weak_callback_);
@@ -653,7 +648,6 @@ class GlobalHandles::TracedNode final
void MarkAsUsed() { set_state(NORMAL); }
bool IsInUse() const { return state() != FREE; }
bool IsRetainer() const { return state() == NORMAL; }
- bool IsPhantomResetHandle() const { return callback_ == nullptr; }
bool is_in_young_list() const { return IsInYoungList::decode(flags_); }
void set_in_young_list(bool v) { flags_ = IsInYoungList::update(flags_, v); }
@@ -661,9 +655,6 @@ class GlobalHandles::TracedNode final
bool is_root() const { return IsRoot::decode(flags_); }
void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
- bool has_destructor() const { return HasDestructor::decode(flags_); }
- void set_has_destructor(bool v) { flags_ = HasDestructor::update(flags_, v); }
-
bool markbit() const { return Markbit::decode(flags_); }
void clear_markbit() { flags_ = Markbit::update(flags_, false); }
void set_markbit() { flags_ = Markbit::update(flags_, true); }
@@ -673,44 +664,10 @@ class GlobalHandles::TracedNode final
void clear_object() { object_ = kNullAddress; }
- void SetFinalizationCallback(void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- set_parameter(parameter);
- callback_ = callback;
- }
- bool HasFinalizationCallback() const { return callback_ != nullptr; }
-
void CopyObjectReference(const TracedNode& other) { object_ = other.object_; }
- void CollectPhantomCallbackData(
- std::vector<std::pair<TracedNode*, PendingPhantomCallback>>*
- pending_phantom_callbacks) {
+ void ResetPhantomHandle() {
DCHECK(IsInUse());
- DCHECK_NOT_NULL(callback_);
-
- void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
- nullptr};
- ExtractInternalFields(JSObject::cast(object()), embedder_fields,
- v8::kEmbedderFieldsInWeakCallback);
-
- // Zap with something dangerous.
- location().store(Object(0xCA11));
-
- pending_phantom_callbacks->push_back(std::make_pair(
- this, PendingPhantomCallback(callback_, parameter(), embedder_fields)));
- set_state(NEAR_DEATH);
- }
-
- void ResetPhantomHandle(HandleHolder handle_holder) {
- DCHECK(IsInUse());
- // Even if the handle holder should be alive, the back reference may have
- // been cleared which prevents the handle from being reclaimed at this
- // point. This can happen for explicitly reset handles during incremental
- // marking that then cannot be reclaimed during Scavenge.
- if (handle_holder == HandleHolder::kLive && data_.parameter) {
- Address** handle = reinterpret_cast<Address**>(data_.parameter);
- *handle = nullptr;
- }
NodeSpace<TracedNode>::Release(this);
DCHECK(!IsInUse());
}
@@ -721,27 +678,21 @@ class GlobalHandles::TracedNode final
using NodeState = base::BitField8<State, 0, 2>;
using IsInYoungList = NodeState::Next<bool, 1>;
using IsRoot = IsInYoungList::Next<bool, 1>;
- using HasDestructor = IsRoot::Next<bool, 1>;
- using Markbit = HasDestructor::Next<bool, 1>;
+ using Markbit = IsRoot::Next<bool, 1>;
using IsOnStack = Markbit::Next<bool, 1>;
void ClearImplFields() {
set_root(true);
// Nodes are black allocated for simplicity.
set_markbit();
- callback_ = nullptr;
set_is_on_stack(false);
- set_has_destructor(false);
}
void CheckImplFieldsAreCleared() const {
DCHECK(is_root());
DCHECK(markbit());
- DCHECK_NULL(callback_);
}
- WeakCallbackInfo<void>::Callback callback_;
-
friend class NodeBase<GlobalHandles::TracedNode>;
};
@@ -902,9 +853,6 @@ void GlobalHandles::TracedNode::Verify(GlobalHandles* global_handles,
#ifdef DEBUG
const TracedNode* node = FromLocation(*slot);
DCHECK(node->IsInUse());
- DCHECK_IMPLIES(!node->has_destructor(), nullptr == node->parameter());
- DCHECK_IMPLIES(node->has_destructor() && !node->HasFinalizationCallback(),
- node->parameter());
bool slot_on_stack = global_handles->on_stack_nodes_->IsOnStack(
reinterpret_cast<uintptr_t>(slot));
DCHECK_EQ(slot_on_stack, node->is_on_stack());
@@ -971,17 +919,16 @@ Handle<Object> GlobalHandles::Create(Address value) {
return Create(Object(value));
}
-Handle<Object> GlobalHandles::CreateTraced(
- Object value, Address* slot, GlobalHandleDestructionMode destruction_mode,
- GlobalHandleStoreMode store_mode) {
+Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot,
+ GlobalHandleStoreMode store_mode) {
return CreateTraced(
- value, slot, destruction_mode, store_mode,
+ value, slot, store_mode,
on_stack_nodes_->IsOnStack(reinterpret_cast<uintptr_t>(slot)));
}
-Handle<Object> GlobalHandles::CreateTraced(
- Object value, Address* slot, GlobalHandleDestructionMode destruction_mode,
- GlobalHandleStoreMode store_mode, bool is_on_stack) {
+Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot,
+ GlobalHandleStoreMode store_mode,
+ bool is_on_stack) {
GlobalHandles::TracedNode* result;
if (is_on_stack) {
result = on_stack_nodes_->Acquire(value, reinterpret_cast<uintptr_t>(slot));
@@ -995,17 +942,13 @@ Handle<Object> GlobalHandles::CreateTraced(
WriteBarrier::MarkingFromGlobalHandle(value);
}
}
- const bool has_destructor =
- destruction_mode == GlobalHandleDestructionMode::kWithDestructor;
- result->set_has_destructor(has_destructor);
- result->set_parameter(has_destructor ? slot : nullptr);
+ result->set_parameter(nullptr);
return result->handle();
}
-Handle<Object> GlobalHandles::CreateTraced(
- Address value, Address* slot, GlobalHandleDestructionMode destruction_mode,
- GlobalHandleStoreMode store_mode) {
- return CreateTraced(Object(value), slot, destruction_mode, store_mode);
+Handle<Object> GlobalHandles::CreateTraced(Address value, Address* slot,
+ GlobalHandleStoreMode store_mode) {
+ return CreateTraced(Object(value), slot, store_mode);
}
Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
@@ -1028,23 +971,15 @@ void SetSlotThreadSafe(Address** slot, Address* val) {
} // namespace
// static
-void GlobalHandles::CopyTracedGlobal(const Address* const* from, Address** to) {
+void GlobalHandles::CopyTracedReference(const Address* const* from,
+ Address** to) {
DCHECK_NOT_NULL(*from);
DCHECK_NULL(*to);
const TracedNode* node = TracedNode::FromLocation(*from);
- // Copying a traced handle with finalization callback is prohibited because
- // the callback may require knowing about multiple copies of the traced
- // handle.
- CHECK_WITH_MSG(!node->HasFinalizationCallback(),
- "Copying of references is not supported when "
- "SetFinalizationCallback is set.");
-
GlobalHandles* global_handles =
GlobalHandles::From(const_cast<TracedNode*>(node));
Handle<Object> o = global_handles->CreateTraced(
node->object(), reinterpret_cast<Address*>(to),
- node->has_destructor() ? GlobalHandleDestructionMode::kWithDestructor
- : GlobalHandleDestructionMode::kWithoutDestructor,
GlobalHandleStoreMode::kAssigningStore);
SetSlotThreadSafe(to, o.location());
TracedNode::Verify(global_handles, from);
@@ -1070,10 +1005,10 @@ void GlobalHandles::MoveGlobal(Address** from, Address** to) {
// those the callers need to ensure consistency.
}
-void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
+void GlobalHandles::MoveTracedReference(Address** from, Address** to) {
// Fast path for moving from an empty reference.
if (!*from) {
- DestroyTraced(*to);
+ DestroyTracedReference(*to);
SetSlotThreadSafe(to, nullptr);
return;
}
@@ -1097,17 +1032,6 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
to_on_stack = to_node->is_on_stack();
}
- // Moving a traced handle with finalization callback is prohibited because
- // the callback may require knowing about multiple copies of the traced
- // handle.
- CHECK_WITH_MSG(!from_node->HasFinalizationCallback(),
- "Moving of references is not supported when "
- "SetFinalizationCallback is set.");
- // Types in v8.h ensure that we only copy/move handles that have the same
- // destructor behavior.
- DCHECK_IMPLIES(to_node,
- to_node->has_destructor() == from_node->has_destructor());
-
// Moving.
if (from_on_stack || to_on_stack) {
// Move involving a stack slot.
@@ -1115,9 +1039,6 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
DCHECK(global_handles);
Handle<Object> o = global_handles->CreateTraced(
from_node->object(), reinterpret_cast<Address*>(to),
- from_node->has_destructor()
- ? GlobalHandleDestructionMode::kWithDestructor
- : GlobalHandleDestructionMode::kWithoutDestructor,
GlobalHandleStoreMode::kAssigningStore, to_on_stack);
SetSlotThreadSafe(to, o.location());
to_node = TracedNode::FromLocation(*to);
@@ -1135,20 +1056,16 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
WriteBarrier::MarkingFromGlobalHandle(to_node->object());
}
}
- DestroyTraced(*from);
+ DestroyTracedReference(*from);
SetSlotThreadSafe(from, nullptr);
} else {
// Pure heap move.
- DestroyTraced(*to);
+ DestroyTracedReference(*to);
SetSlotThreadSafe(to, *from);
to_node = from_node;
DCHECK_NOT_NULL(*from);
DCHECK_NOT_NULL(*to);
DCHECK_EQ(*from, *to);
- // Fixup back reference for destructor.
- if (to_node->has_destructor()) {
- to_node->set_parameter(to);
- }
WriteBarrier::MarkingFromGlobalHandle(to_node->object());
SetSlotThreadSafe(from, nullptr);
}
@@ -1175,7 +1092,7 @@ void GlobalHandles::Destroy(Address* location) {
}
// static
-void GlobalHandles::DestroyTraced(Address* location) {
+void GlobalHandles::DestroyTracedReference(Address* location) {
if (location != nullptr) {
TracedNode* node = TracedNode::FromLocation(location);
if (node->is_on_stack()) {
@@ -1209,20 +1126,9 @@ void GlobalHandles::DestroyTraced(Address* location) {
// next cycle.
node->clear_object();
node->set_parameter(nullptr);
- node->SetFinalizationCallback(nullptr, nullptr);
- // The destructor setting is left untouched to avoid casting a
- // v8::TracedGlobal to a v8::TracedReference for the EmbedderRootsHandler
- // which would be UB.
}
}
-void GlobalHandles::SetFinalizationCallbackForTraced(
- Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- TracedNode::FromLocation(location)->SetFinalizationCallback(parameter,
- callback);
-}
-
using GenericCallback = v8::WeakCallbackInfo<void>::Callback;
void GlobalHandles::MakeWeak(Address* location, void* parameter,
@@ -1269,7 +1175,7 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
should_reset_handle(isolate()->heap(), node->location())) {
if (node->IsPhantomResetHandle()) {
node->MarkPending();
- node->ResetPhantomHandle(HandleHolder::kLive);
+ node->ResetPhantomHandle();
++number_of_phantom_handle_resets_;
} else if (node->IsPhantomCallback()) {
node->MarkPending();
@@ -1280,31 +1186,20 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
for (TracedNode* node : *traced_nodes_) {
if (!node->IsInUse()) continue;
// Detect unreachable nodes first.
- if (!node->markbit() && node->IsPhantomResetHandle() &&
- !node->has_destructor()) {
- // The handle is unreachable and does not have a callback and a
- // destructor associated with it. We can clear it even if the target V8
- // object is alive. Note that the desctructor and the callback may
- // access the handle, that is why we avoid clearing it.
- node->ResetPhantomHandle(HandleHolder::kDead);
+ if (!node->markbit()) {
+ // The handle itself is unreachable. We can clear it even if the target V8
+ // object is alive.
+ node->ResetPhantomHandle();
++number_of_phantom_handle_resets_;
continue;
- } else if (node->markbit()) {
- // Clear the markbit for the next GC.
- node->clear_markbit();
}
+ // Clear the markbit for the next GC.
+ node->clear_markbit();
DCHECK(node->IsInUse());
// Detect nodes with unreachable target objects.
if (should_reset_handle(isolate()->heap(), node->location())) {
- // If the node allows eager resetting, then reset it here. Otherwise,
- // collect its callback that will reset it.
- if (node->IsPhantomResetHandle()) {
- node->ResetPhantomHandle(node->has_destructor() ? HandleHolder::kLive
- : HandleHolder::kDead);
- ++number_of_phantom_handle_resets_;
- } else {
- node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
- }
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
}
}
}
@@ -1335,15 +1230,8 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
DCHECK(node->is_root());
if (is_unmodified(node->location())) {
v8::Value* value = ToApi<v8::Value>(node->handle());
- if (node->has_destructor()) {
- START_ALLOW_USE_DEPRECATED()
- node->set_root(handler->IsRoot(
- *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
- END_ALLOW_USE_DEPRECATED()
- } else {
- node->set_root(handler->IsRoot(
- *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)));
- }
+ node->set_root(handler->IsRoot(
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)));
}
}
}
@@ -1397,7 +1285,7 @@ void GlobalHandles::IterateYoungWeakObjectsForPhantomHandles(
DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
if (node->IsPhantomResetHandle()) {
node->MarkPending();
- node->ResetPhantomHandle(HandleHolder::kLive);
+ node->ResetPhantomHandle();
++number_of_phantom_handle_resets_;
} else if (node->IsPhantomCallback()) {
node->MarkPending();
@@ -1422,25 +1310,13 @@ void GlobalHandles::IterateYoungWeakObjectsForPhantomHandles(
DCHECK_IMPLIES(node->is_root(),
!should_reset_handle(isolate_->heap(), node->location()));
if (should_reset_handle(isolate_->heap(), node->location())) {
- if (node->IsPhantomResetHandle()) {
- if (node->has_destructor()) {
- // For handles with destructor it is guaranteed that the embedder
- // memory is still alive as the destructor would have otherwise
- // removed the memory.
- node->ResetPhantomHandle(HandleHolder::kLive);
- } else {
- v8::Value* value = ToApi<v8::Value>(node->handle());
- handler->ResetRoot(
- *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
- // We cannot check whether a node is in use here as the reset behavior
- // depends on whether incremental marking is running when reclaiming
- // young objects.
- }
-
- ++number_of_phantom_handle_resets_;
- } else {
- node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
- }
+ v8::Value* value = ToApi<v8::Value>(node->handle());
+ handler->ResetRoot(
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
+ // We cannot check whether a node is in use here as the reset behavior
+ // depends on whether incremental marking is running when reclaiming
+ // young objects.
+ ++number_of_phantom_handle_resets_;
} else {
if (!node->is_root()) {
node->set_root(true);
@@ -1724,15 +1600,8 @@ void GlobalHandles::IterateTracedNodes(
for (TracedNode* node : *traced_nodes_) {
if (node->IsInUse()) {
v8::Value* value = ToApi<v8::Value>(node->handle());
- if (node->has_destructor()) {
- START_ALLOW_USE_DEPRECATED()
- visitor->VisitTracedGlobalHandle(
- *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
- END_ALLOW_USE_DEPRECATED()
- } else {
- visitor->VisitTracedReference(
- *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
- }
+ visitor->VisitTracedReference(
+ *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
}
}
}
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index 058af91069..155a0f89e4 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -90,12 +90,9 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
// API for traced handles.
//
- static void MoveTracedGlobal(Address** from, Address** to);
- static void CopyTracedGlobal(const Address* const* from, Address** to);
- static void DestroyTraced(Address* location);
- static void SetFinalizationCallbackForTraced(
- Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback);
+ static void MoveTracedReference(Address** from, Address** to);
+ static void CopyTracedReference(const Address* const* from, Address** to);
+ static void DestroyTracedReference(Address* location);
static void MarkTraced(Address* location);
explicit GlobalHandles(Isolate* isolate);
@@ -109,14 +106,11 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
inline Handle<T> Create(T value);
Handle<Object> CreateTraced(Object value, Address* slot,
- GlobalHandleDestructionMode destruction_mode,
GlobalHandleStoreMode store_mode,
bool is_on_stack);
Handle<Object> CreateTraced(Object value, Address* slot,
- GlobalHandleDestructionMode destruction_mode,
GlobalHandleStoreMode store_mode);
Handle<Object> CreateTraced(Address value, Address* slot,
- GlobalHandleDestructionMode destruction_mode,
GlobalHandleStoreMode store_mode);
void RecordStats(HeapStats* stats);
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 7c6d7fdda6..2a8feffc59 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -64,10 +64,6 @@ class V8ToCppGCReferencesVisitor final
isolate_(isolate),
wrapper_descriptor_(wrapper_descriptor) {}
- void VisitTracedGlobalHandle(const v8::TracedGlobal<v8::Value>&) final {
- UNREACHABLE();
- }
-
void VisitTracedReference(const v8::TracedReference<v8::Value>& value) final {
VisitHandle(value, value.WrapperClassId());
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index a61b89c5dc..ac0b9df4ea 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -6,7 +6,6 @@
#include "include/v8-cppgc.h"
#include "src/base/logging.h"
-#include "src/common/allow-deprecated.h"
#include "src/handles/global-handles.h"
#include "src/heap/embedder-tracing-inl.h"
#include "src/heap/gc-tracer.h"
@@ -211,13 +210,6 @@ bool DefaultEmbedderRootsHandler::IsRoot(
return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
}
-START_ALLOW_USE_DEPRECATED()
-bool DefaultEmbedderRootsHandler::IsRoot(
- const v8::TracedGlobal<v8::Value>& handle) {
- return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
-}
-END_ALLOW_USE_DEPRECATED()
-
void DefaultEmbedderRootsHandler::ResetRoot(
const v8::TracedReference<v8::Value>& handle) {
// Resetting is only called when IsRoot() returns false which
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 72b1fd90e3..55813b3168 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -27,10 +27,6 @@ class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
public:
bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
- START_ALLOW_USE_DEPRECATED()
- bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) final;
- END_ALLOW_USE_DEPRECATED()
-
void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 36f04a424e..0286bde239 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -1239,7 +1239,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DEFINE_DEOPT_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(EagerSoftAndBailoutDeoptCount, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 2ae72478e1..690c68de3f 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -1078,7 +1078,7 @@ class DeoptimizationData : public FixedArray {
static const int kSharedFunctionInfoIndex = 6;
static const int kInliningPositionsIndex = 7;
static const int kDeoptExitStartIndex = 8;
- static const int kEagerSoftAndBailoutDeoptCountIndex = 9;
+ static const int kNonLazyDeoptCountIndex = 9;
static const int kLazyDeoptCountIndex = 10;
static const int kFirstDeoptEntryIndex = 11;
@@ -1107,7 +1107,7 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DECL_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
- DECL_ELEMENT_ACCESSORS(EagerSoftAndBailoutDeoptCount, Smi)
+ DECL_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
DECL_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
#undef DECL_ELEMENT_ACCESSORS
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index bbe85ba823..1e46f22c04 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -599,25 +599,31 @@ size_t CpuProfiler::GetEstimatedMemoryUsage() const {
return code_observer_->GetEstimatedMemoryUsage();
}
-CpuProfilingStatus CpuProfiler::StartProfiling(
+CpuProfilingResult CpuProfiler::StartProfiling(
+ CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ return StartProfiling(nullptr, options, std::move(delegate));
+}
+
+CpuProfilingResult CpuProfiler::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
- StartProfilingStatus status =
+ CpuProfilingResult result =
profiles_->StartProfiling(title, options, std::move(delegate));
// TODO(nicodubus): Revisit logic for if we want to do anything different for
// kAlreadyStarted
- if (status == CpuProfilingStatus::kStarted ||
- status == CpuProfilingStatus::kAlreadyStarted) {
+ if (result.status == CpuProfilingStatus::kStarted ||
+ result.status == CpuProfilingStatus::kAlreadyStarted) {
TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
AdjustSamplingInterval();
StartProcessorIfNotStarted();
}
- return status;
+ return result;
}
-CpuProfilingStatus CpuProfiler::StartProfiling(
+CpuProfilingResult CpuProfiler::StartProfiling(
String title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
return StartProfiling(profiles_->GetName(title), options,
@@ -651,10 +657,19 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
+ CpuProfile* profile = profiles_->Lookup(title);
+ if (profile) {
+ return StopProfiling(profile->id());
+ }
+ return nullptr;
+}
+
+CpuProfile* CpuProfiler::StopProfiling(ProfilerId id) {
if (!is_profiling_) return nullptr;
- const bool last_profile = profiles_->IsLastProfile(title);
+ const bool last_profile = profiles_->IsLastProfileLeft(id);
if (last_profile) StopProcessor();
- CpuProfile* result = profiles_->StopProfiling(title);
+
+ CpuProfile* profile = profiles_->StopProfiling(id);
AdjustSamplingInterval();
@@ -663,7 +678,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
DisableLogging();
}
- return result;
+ return profile;
}
CpuProfile* CpuProfiler::StopProfiling(String title) {
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 791c8cc1a1..ae5e2d9edc 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -336,6 +336,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
static size_t GetAllProfilersMemorySize(Isolate* isolate);
using ProfilingMode = v8::CpuProfilingMode;
+ using CpuProfilingResult = v8::CpuProfilingResult;
using NamingMode = v8::CpuProfilingNamingMode;
using LoggingMode = v8::CpuProfilingLoggingMode;
using StartProfilingStatus = CpuProfilingStatus;
@@ -345,15 +346,20 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void set_use_precise_sampling(bool);
void CollectSample();
size_t GetEstimatedMemoryUsage() const;
- StartProfilingStatus StartProfiling(
+ CpuProfilingResult StartProfiling(
+ CpuProfilingOptions options = {},
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
+ CpuProfilingResult StartProfiling(
const char* title, CpuProfilingOptions options = {},
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
- StartProfilingStatus StartProfiling(
+ CpuProfilingResult StartProfiling(
String title, CpuProfilingOptions options = {},
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String title);
+ CpuProfile* StopProfiling(ProfilerId id);
+
int GetProfilesCount();
CpuProfile* GetProfile(int index);
void DeleteAllProfiles();
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index fc7e080f37..4ef7d9f010 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -570,9 +570,7 @@ void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
using v8::tracing::TracedValue;
-std::atomic<uint32_t> CpuProfile::last_id_;
-
-CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
+CpuProfile::CpuProfile(CpuProfiler* profiler, ProfilerId id, const char* title,
CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate)
: title_(title),
@@ -582,7 +580,7 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
top_down_(profiler->isolate(), profiler->code_entries()),
profiler_(profiler),
streaming_next_sample_(0),
- id_(++last_id_) {
+ id_(id) {
// The startTime timestamp is not converted to Perfetto's clock domain and
// will get out of sync with other timestamps Perfetto knows about, including
// the automatic trace event "ts" timestamp. startTime is included for
@@ -594,6 +592,9 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
"Profile", id_, "data", std::move(value));
DisallowHeapAllocation no_gc;
+ if (delegate_) {
+ delegate_->SetId(id_);
+ }
if (options_.has_filter_context()) {
i::Address raw_filter_context =
reinterpret_cast<i::Address>(options_.raw_filter_context());
@@ -891,42 +892,58 @@ size_t CodeMap::GetEstimatedMemoryUsage() const {
}
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
- : profiler_(nullptr), current_profiles_semaphore_(1) {}
+ : profiler_(nullptr),
+ current_profiles_semaphore_(1),
+ last_id_(0),
+ isolate_(isolate) {
+ USE(isolate_);
+}
-CpuProfilingStatus CpuProfilesCollection::StartProfiling(
+CpuProfilingResult CpuProfilesCollection::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
current_profiles_semaphore_.Wait();
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
-
- return CpuProfilingStatus::kErrorTooManyProfilers;
+ return {
+ 0,
+ CpuProfilingStatus::kErrorTooManyProfilers,
+ };
}
- for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
- if (strcmp(profile->title(), title) == 0) {
- // Ignore attempts to start profile with the same title...
- current_profiles_semaphore_.Signal();
- // ... though return kAlreadyStarted to force it collect a sample.
- return CpuProfilingStatus::kAlreadyStarted;
+
+ if (title != nullptr) {
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
+ if (profile->title() != nullptr && strcmp(profile->title(), title) == 0) {
+ // Ignore attempts to start profile with the same title...
+ current_profiles_semaphore_.Signal();
+ // ... though return kAlreadyStarted to force it collect a sample.
+ return {
+ profile->id(),
+ CpuProfilingStatus::kAlreadyStarted,
+ };
+ }
}
}
- current_profiles_.emplace_back(
- new CpuProfile(profiler_, title, options, std::move(delegate)));
+ CpuProfile* profile = new CpuProfile(profiler_, ++last_id_, title, options,
+ std::move(delegate));
+ current_profiles_.emplace_back(profile);
current_profiles_semaphore_.Signal();
- return CpuProfilingStatus::kStarted;
+
+ return {
+ profile->id(),
+ CpuProfilingStatus::kStarted,
+ };
}
-CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
- const bool empty_title = (title[0] == '\0');
- CpuProfile* profile = nullptr;
+CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
current_profiles_semaphore_.Wait();
+ CpuProfile* profile = nullptr;
- auto it = std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
- [&](const std::unique_ptr<CpuProfile>& p) {
- return empty_title || strcmp(p->title(), title) == 0;
- });
+ auto it = std::find_if(
+ current_profiles_.rbegin(), current_profiles_.rend(),
+ [=](const std::unique_ptr<CpuProfile>& p) { return id == p->id(); });
if (it != current_profiles_.rend()) {
(*it)->FinishProfile();
@@ -935,21 +952,44 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
// Convert reverse iterator to matching forward iterator.
current_profiles_.erase(--(it.base()));
}
-
current_profiles_semaphore_.Signal();
return profile;
}
-bool CpuProfilesCollection::IsLastProfile(const char* title) {
+CpuProfile* CpuProfilesCollection::Lookup(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
- if (current_profiles_.size() != 1) return false;
- return title[0] == '\0' || strcmp(current_profiles_[0]->title(), title) == 0;
+ DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
+ if (title == nullptr) {
+ return nullptr;
+ }
+ // http://crbug/51594, edge case console.profile may provide an empty title
+ // and must not crash
+ const bool empty_title = title[0] == '\0';
+ auto it = std::find_if(
+ current_profiles_.rbegin(), current_profiles_.rend(),
+ [&](const std::unique_ptr<CpuProfile>& p) {
+ return (empty_title ||
+ (p->title() != nullptr && strcmp(p->title(), title) == 0));
+ });
+ if (it != current_profiles_.rend()) {
+ return it->get();
+ }
+
+ return nullptr;
}
+bool CpuProfilesCollection::IsLastProfileLeft(ProfilerId id) {
+ // Called from VM thread, and only it can mutate the list,
+ // so no locking is needed here.
+ DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
+ if (current_profiles_.size() != 1) return false;
+ return id == current_profiles_[0]->id();
+}
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
+ DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
auto pos =
std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
[&](const std::unique_ptr<CpuProfile>& finished_profile) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 85402564ff..d8d38ce034 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -411,7 +411,8 @@ class CpuProfile {
};
V8_EXPORT_PRIVATE CpuProfile(
- CpuProfiler* profiler, const char* title, CpuProfilingOptions options,
+ CpuProfiler* profiler, ProfilerId id, const char* title,
+ CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
CpuProfile(const CpuProfile&) = delete;
CpuProfile& operator=(const CpuProfile&) = delete;
@@ -440,6 +441,7 @@ class CpuProfile {
base::TimeTicks end_time() const { return end_time_; }
CpuProfiler* cpu_profiler() const { return profiler_; }
ContextFilter& context_filter() { return context_filter_; }
+ ProfilerId id() const { return id_; }
void UpdateTicksScale();
@@ -458,17 +460,15 @@ class CpuProfile {
ProfileTree top_down_;
CpuProfiler* const profiler_;
size_t streaming_next_sample_;
- uint32_t id_;
+ const ProfilerId id_;
// Number of microseconds worth of profiler ticks that should elapse before
// the next sample is recorded.
base::TimeDelta next_sample_delta_;
-
- static std::atomic<uint32_t> last_id_;
};
class CpuProfileMaxSamplesCallbackTask : public v8::Task {
public:
- CpuProfileMaxSamplesCallbackTask(
+ explicit CpuProfileMaxSamplesCallbackTask(
std::unique_ptr<DiscardedSamplesDelegate> delegate)
: delegate_(std::move(delegate)) {}
@@ -540,16 +540,18 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
CpuProfilesCollection& operator=(const CpuProfilesCollection&) = delete;
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
- CpuProfilingStatus StartProfiling(
- const char* title, CpuProfilingOptions options = {},
+ CpuProfilingResult StartProfiling(
+ const char* title = nullptr, CpuProfilingOptions options = {},
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
- CpuProfile* StopProfiling(const char* title);
+ CpuProfile* StopProfiling(ProfilerId id);
+ bool IsLastProfileLeft(ProfilerId id);
+ CpuProfile* Lookup(const char* title);
+
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
return &finished_profiles_;
}
const char* GetName(Name name) { return resource_names_.GetName(name); }
- bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
// Finds a common sampling interval dividing each CpuProfile's interval,
@@ -579,6 +581,8 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
// Accessed by VM thread and profile generator thread.
std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
base::Semaphore current_profiles_semaphore_;
+ ProfilerId last_id_;
+ Isolate* isolate_;
};
} // namespace internal
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index b0ff48ced7..b46a64e2cc 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -1032,22 +1032,15 @@
'test-debug/TerminateOnResumeFromOtherThread': [SKIP],
'test-debug/TerminateOnResumeRunJavaScriptAtBreakpoint': [SKIP],
'test-debug/TerminateOnResumeRunMicrotaskAtBreakpoint': [SKIP],
+ 'test-embedder-tracing/BasicTracedReference': [SKIP],
'test-embedder-tracing/GarbageCollectionForTesting': [SKIP],
'test-embedder-tracing/NotifyEmptyStack': [SKIP],
- 'test-embedder-tracing/TracedGlobalCopy': [SKIP],
- 'test-embedder-tracing/TracedGlobalCopyNoDestructor': [SKIP],
- 'test-embedder-tracing/TracedGlobalCopyWithDestructor': [SKIP],
- 'test-embedder-tracing/TracedGlobalDestructor': [SKIP],
- 'test-embedder-tracing/TracedGlobalInStdUnorderedMap': [SKIP],
- 'test-embedder-tracing/TracedGlobalInStdVector': [SKIP],
- 'test-embedder-tracing/TracedGlobalMove': [SKIP],
- 'test-embedder-tracing/TracedGlobalNoDestructor': [SKIP],
- 'test-embedder-tracing/TracedGlobalSetFinalizationCallbackMarkSweep': [SKIP],
- 'test-embedder-tracing/TracedGlobalToUnmodifiedJSObjectDiesOnMarkSweep': [SKIP],
+ 'test-embedder-tracing/TracedReferenceCopyReferences': [SKIP],
'test-embedder-tracing/TracedReferenceCopy': [SKIP],
'test-embedder-tracing/TracedReferenceHandlesDoNotLeak': [SKIP],
'test-embedder-tracing/TracedReferenceHandlesMarking': [SKIP],
'test-embedder-tracing/TracedReferenceMove': [SKIP],
+ 'test-embedder-tracing/TracedReferenceToUnmodifiedJSObjectDiesOnMarkSweep': [SKIP],
'test-embedder-tracing/TracingInEphemerons': [SKIP],
'test-embedder-tracing/TracingInRevivedSubgraph': [SKIP],
'test-embedder-tracing/V8RegisteringEmbedderReference': [SKIP],
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 7acdc6b0aa..45e025996f 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -70,19 +70,11 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
embedder_fields.begin(), embedder_fields.end());
}
- void AddReferenceForTracing(v8::TracedGlobal<v8::Value>* global) {
- to_register_with_v8_.push_back(global);
- }
-
void AddReferenceForTracing(v8::TracedReference<v8::Value>* ref) {
to_register_with_v8_references_.push_back(ref);
}
bool AdvanceTracing(double deadline_in_ms) final {
- for (auto global : to_register_with_v8_) {
- RegisterEmbedderReference(global->As<v8::Data>());
- }
- to_register_with_v8_.clear();
for (auto ref : to_register_with_v8_references_) {
RegisterEmbedderReference(ref->As<v8::Data>());
}
@@ -90,7 +82,7 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
return true;
}
- bool IsTracingDone() final { return to_register_with_v8_.empty(); }
+ bool IsTracingDone() final { return to_register_with_v8_references_.empty(); }
void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
@@ -112,21 +104,31 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
return false;
}
- void ConsiderTracedGlobalAsRoot(bool value) {
- consider_traced_global_as_root_ = value;
+ void DoNotConsiderAsRootForScavenge(v8::TracedReference<v8::Value>* handle) {
+ handle->SetWrapperClassId(17);
+ non_root_handles_.push_back(handle);
+ }
+
+ bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ return handle.WrapperClassId() != 17;
}
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
- return consider_traced_global_as_root_;
+ void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ for (auto* non_root_handle : non_root_handles_) {
+ if (*non_root_handle == handle) {
+ non_root_handle->Reset();
+ }
+ }
}
private:
std::vector<std::pair<void*, void*>> registered_from_v8_;
- std::vector<v8::TracedGlobal<v8::Value>*> to_register_with_v8_;
std::vector<v8::TracedReference<v8::Value>*> to_register_with_v8_references_;
- bool consider_traced_global_as_root_ = true;
TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
v8::Global<v8::Array> array_;
+ std::vector<v8::TracedReference<v8::Value>*> non_root_handles_;
};
} // namespace
@@ -163,16 +165,16 @@ TEST(EmbedderRegisteringV8Reference) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::TracedGlobal<v8::Value> g;
+ auto handle = std::make_unique<v8::TracedReference<v8::Value>>();
{
v8::HandleScope inner_scope(isolate);
v8::Local<v8::Value> o =
v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- g.Reset(isolate, o);
+ handle->Reset(isolate, o);
}
- tracer.AddReferenceForTracing(&g);
+ tracer.AddReferenceForTracing(handle.get());
CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(!g.IsEmpty());
+ CHECK(!handle->IsEmpty());
}
namespace {
@@ -299,12 +301,12 @@ TEST(FinalizeTracingWhenMarking) {
namespace {
void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- v8::TracedGlobal<v8::Object>* global) {
+ v8::TracedReference<v8::Object>* handle) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> object(v8::Object::New(isolate));
CHECK(!object.IsEmpty());
- *global = v8::TracedGlobal<v8::Object>(isolate, object);
- CHECK(!global->IsEmpty());
+ *handle = v8::TracedReference<v8::Object>(isolate, object);
+ CHECK(!handle->IsEmpty());
}
template <typename T>
@@ -320,92 +322,47 @@ void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
enum class SurvivalMode { kSurvives, kDies };
-template <typename ModifierFunction, typename ConstructTracedGlobalFunction>
-void TracedGlobalTest(v8::Isolate* isolate,
- ConstructTracedGlobalFunction construct_function,
- ModifierFunction modifier_function, void (*gc_function)(),
- SurvivalMode survives) {
+template <typename ModifierFunction, typename ConstructTracedReferenceFunction>
+void TracedReferenceTest(v8::Isolate* isolate,
+ ConstructTracedReferenceFunction construct_function,
+ ModifierFunction modifier_function,
+ void (*gc_function)(), SurvivalMode survives) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
+ auto* global_handles =
+ reinterpret_cast<i::Isolate*>(isolate)->global_handles();
- auto global = std::make_unique<v8::TracedGlobal<v8::Object>>();
- construct_function(isolate, context, global.get());
- CHECK(InCorrectGeneration(isolate, *global));
- modifier_function(*global);
+ const size_t initial_count = global_handles->handles_count();
+ auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
+ construct_function(isolate, context, handle.get());
+ CHECK(InCorrectGeneration(isolate, *handle));
+ modifier_function(*handle);
+ const size_t after_modification_count = global_handles->handles_count();
gc_function();
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !global->IsEmpty());
- CHECK_IMPLIES(survives == SurvivalMode::kDies, global->IsEmpty());
+ // Cannot check the handle as it is not explicitly cleared by the GC. Instead
+ // check the handles count.
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
+ after_modification_count == global_handles->handles_count());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies,
+ initial_count == global_handles->handles_count());
}
} // namespace
-TEST(TracedGlobalReset) {
+TEST(TracedReferenceReset) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::TracedGlobal<v8::Object> traced;
- ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
- CHECK(!traced.IsEmpty());
- traced.Reset();
- CHECK(traced.IsEmpty());
-}
-
-TEST(TracedGlobalInStdVector) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- std::vector<v8::TracedGlobal<v8::Object>> vec;
- {
- v8::HandleScope new_scope(isolate);
- vec.emplace_back(isolate, v8::Object::New(isolate));
- }
- CHECK(!vec[0].IsEmpty());
- InvokeMarkSweep();
- CHECK(vec[0].IsEmpty());
-}
-
-TEST(TracedGlobalCopyWithDestructor) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope outer_scope(isolate);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto global1 = std::make_unique<v8::TracedGlobal<v8::Object>>();
- {
- v8::HandleScope scope(isolate);
- global1->Reset(isolate, v8::Object::New(isolate));
- }
- auto global2 = std::make_unique<v8::TracedGlobal<v8::Object>>(*global1);
- auto global3 = std::make_unique<v8::TracedGlobal<v8::Object>>();
- *global3 = *global2;
- CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1->IsEmpty());
- CHECK_EQ(*global1, *global2);
- CHECK_EQ(*global2, *global3);
- {
- v8::HandleScope scope(isolate);
- auto tmp = v8::Local<v8::Object>::New(isolate, *global3);
- CHECK(!tmp.IsEmpty());
- InvokeMarkSweep();
- }
- CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1->IsEmpty());
- CHECK_EQ(*global1, *global2);
- CHECK_EQ(*global2, *global3);
- InvokeMarkSweep();
- CHECK_EQ(initial_count, global_handles->handles_count());
- CHECK(global1->IsEmpty());
- CHECK_EQ(*global1, *global2);
- CHECK_EQ(*global2, *global3);
+ v8::TracedReference<v8::Object> handle;
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), &handle);
+ CHECK(!handle.IsEmpty());
+ handle.Reset();
+ CHECK(handle.IsEmpty());
}
-TEST(TracedGlobalCopyNoDestructor) {
+TEST(TracedReferenceCopyReferences) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -413,127 +370,112 @@ TEST(TracedGlobalCopyNoDestructor) {
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
const size_t initial_count = global_handles->handles_count();
- auto global1 = std::make_unique<v8::TracedReference<v8::Value>>();
+ auto handle1 = std::make_unique<v8::TracedReference<v8::Value>>();
{
v8::HandleScope scope(isolate);
- global1->Reset(isolate, v8::Object::New(isolate));
+ handle1->Reset(isolate, v8::Object::New(isolate));
}
- auto global2 = std::make_unique<v8::TracedReference<v8::Value>>(*global1);
- auto global3 = std::make_unique<v8::TracedReference<v8::Value>>();
- *global3 = *global2;
+ auto handle2 = std::make_unique<v8::TracedReference<v8::Value>>(*handle1);
+ auto handle3 = std::make_unique<v8::TracedReference<v8::Value>>();
+ *handle3 = *handle2;
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1->IsEmpty());
- CHECK_EQ(*global1, *global2);
- CHECK_EQ(*global2, *global3);
+ CHECK(!handle1->IsEmpty());
+ CHECK_EQ(*handle1, *handle2);
+ CHECK_EQ(*handle2, *handle3);
{
v8::HandleScope scope(isolate);
- auto tmp = v8::Local<v8::Value>::New(isolate, *global3);
+ auto tmp = v8::Local<v8::Value>::New(isolate, *handle3);
CHECK(!tmp.IsEmpty());
InvokeMarkSweep();
}
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1->IsEmpty());
- CHECK_EQ(*global1, *global2);
- CHECK_EQ(*global2, *global3);
+ CHECK(!handle1->IsEmpty());
+ CHECK_EQ(*handle1, *handle2);
+ CHECK_EQ(*handle2, *handle3);
InvokeMarkSweep();
CHECK_EQ(initial_count, global_handles->handles_count());
}
-TEST(TracedGlobalInStdUnorderedMap) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- std::unordered_map<int, v8::TracedGlobal<v8::Object>> map;
- {
- v8::HandleScope new_scope(isolate);
- map.emplace(std::piecewise_construct, std::forward_as_tuple(1),
- std::forward_as_tuple(isolate, v8::Object::New(isolate)));
- }
- CHECK(!map[1].IsEmpty());
- InvokeMarkSweep();
- CHECK(map[1].IsEmpty());
-}
-
-TEST(TracedGlobalToUnmodifiedJSObjectDiesOnMarkSweep) {
+TEST(TracedReferenceToUnmodifiedJSObjectDiesOnMarkSweep) {
// When stressing incremental marking, a write barrier may keep the object
// alive.
if (FLAG_stress_incremental_marking) return;
CcTest::InitializeVM();
- TracedGlobalTest(
+ TracedReferenceTest(
CcTest::isolate(), ConstructJSObject,
- [](const TracedGlobal<v8::Object>& global) {}, [] { InvokeMarkSweep(); },
+ [](const TracedReference<v8::Object>&) {}, [] { InvokeMarkSweep(); },
SurvivalMode::kDies);
}
-TEST(TracedGlobalToUnmodifiedJSObjectSurvivesMarkSweepWhenHeldAliveOtherwise) {
+TEST(TracedReferenceToUnmodifiedJSObjectSurvivesMarkSweepWhenHeldAlive) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
v8::Global<v8::Object> strong_global;
- TracedGlobalTest(
+ TracedReferenceTest(
CcTest::isolate(), ConstructJSObject,
- [isolate, &strong_global](const TracedGlobal<v8::Object>& global) {
+ [isolate, &strong_global](const TracedReference<v8::Object>& handle) {
v8::HandleScope scope(isolate);
- strong_global = v8::Global<v8::Object>(isolate, global.Get(isolate));
+ strong_global = v8::Global<v8::Object>(isolate, handle.Get(isolate));
},
[]() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
}
-TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavenge) {
+TEST(TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
- TracedGlobalTest(
+ TracedReferenceTest(
CcTest::isolate(), ConstructJSObject,
- [](const TracedGlobal<v8::Object>& global) {}, []() { InvokeScavenge(); },
+ [](const TracedReference<v8::Object>&) {}, []() { InvokeScavenge(); },
SurvivalMode::kSurvives);
}
-TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavengeWhenExcludedFromRoots) {
+TEST(TracedReferenceToUnmodifiedJSObjectSurvivesScavengeWhenExcludedFromRoots) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.ConsiderTracedGlobalAsRoot(false);
- TracedGlobalTest(
+ TracedReferenceTest(
CcTest::isolate(), ConstructJSObject,
- [](const TracedGlobal<v8::Object>& global) {}, []() { InvokeScavenge(); },
- SurvivalMode::kSurvives);
+ [&tracer](const TracedReference<v8::Object>& handle) {
+ tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
+ },
+ []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
}
-TEST(TracedGlobalToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
+TEST(TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.ConsiderTracedGlobalAsRoot(true);
- TracedGlobalTest(
- CcTest::isolate(), ConstructJSApiObject<TracedGlobal<v8::Object>>,
- [](const TracedGlobal<v8::Object>& global) {}, []() { InvokeScavenge(); },
+ TracedReferenceTest(
+ CcTest::isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [](const TracedReference<v8::Object>&) {}, []() { InvokeScavenge(); },
SurvivalMode::kSurvives);
}
-TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
+TEST(
+ TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.ConsiderTracedGlobalAsRoot(false);
- TracedGlobalTest(
- CcTest::isolate(), ConstructJSApiObject<TracedGlobal<v8::Object>>,
- [](const TracedGlobal<v8::Object>& global) {}, []() { InvokeScavenge(); },
- SurvivalMode::kDies);
+ TracedReferenceTest(
+ CcTest::isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [&tracer](const TracedReference<v8::Object>& handle) {
+ tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
+ },
+ []() { InvokeScavenge(); }, SurvivalMode::kDies);
}
-TEST(TracedGlobalWrapperClassId) {
+TEST(TracedReferenceWrapperClassId) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -541,7 +483,7 @@ TEST(TracedGlobalWrapperClassId) {
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::TracedGlobal<v8::Object> traced;
+ v8::TracedReference<v8::Object> traced;
ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
CHECK_EQ(0, traced.WrapperClassId());
traced.SetWrapperClassId(17);
@@ -599,30 +541,14 @@ TEST(TracedReferenceHandlesDoNotLeak) {
CHECK_EQ(initial_count, final_count + 1);
}
-TEST(TracedGlobalHandlesAreRetained) {
- // TracedGlobal handles are cleared by the destructor of the embedder object.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::TracedGlobal<v8::Value> global;
- global.Reset(isolate, v8::Undefined(isolate));
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- const size_t initial_count = global_handles->handles_count();
- // We need two GCs because handles are black allocated.
- InvokeMarkSweep();
- InvokeMarkSweep();
- const size_t final_count = global_handles->handles_count();
- CHECK_EQ(initial_count, final_count);
-}
-
namespace {
-class TracedGlobalVisitor final
+class TracedReferenceVisitor final
: public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
public:
- ~TracedGlobalVisitor() override = default;
- void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) final {
+ ~TracedReferenceVisitor() override = default;
+
+ void VisitTracedReference(const TracedReference<Value>& value) final {
if (value.WrapperClassId() == 57) {
count_++;
}
@@ -636,7 +562,7 @@ class TracedGlobalVisitor final
} // namespace
-TEST(TracedGlobalIteration) {
+TEST(TracedReferenceIteration) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -644,11 +570,11 @@ TEST(TracedGlobalIteration) {
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
- ConstructJSObject(isolate, isolate->GetCurrentContext(), traced.get());
- CHECK(!traced->IsEmpty());
- traced->SetWrapperClassId(57);
- TracedGlobalVisitor visitor;
+ auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), handle.get());
+ CHECK(!handle->IsEmpty());
+ handle->SetWrapperClassId(57);
+ TracedReferenceVisitor visitor;
{
v8::HandleScope new_scope(isolate);
tracer.IterateTracedGlobalHandles(&visitor);
@@ -656,64 +582,6 @@ TEST(TracedGlobalIteration) {
CHECK_EQ(1, visitor.count());
}
-namespace {
-
-void FinalizationCallback(const WeakCallbackInfo<void>& data) {
- v8::TracedGlobal<v8::Object>* traced =
- reinterpret_cast<v8::TracedGlobal<v8::Object>*>(data.GetParameter());
- CHECK_EQ(reinterpret_cast<void*>(0x4), data.GetInternalField(0));
- CHECK_EQ(reinterpret_cast<void*>(0x8), data.GetInternalField(1));
- traced->Reset();
-}
-
-} // namespace
-
-TEST(TracedGlobalSetFinalizationCallbackScavenge) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- tracer.ConsiderTracedGlobalAsRoot(false);
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
- ConstructJSApiObject(isolate, isolate->GetCurrentContext(), traced.get());
- CHECK(!traced->IsEmpty());
- {
- v8::HandleScope new_scope(isolate);
- auto local = traced->Get(isolate);
- local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
- local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
- }
- traced->SetFinalizationCallback(traced.get(), FinalizationCallback);
- heap::InvokeScavenge();
- CHECK(traced->IsEmpty());
-}
-
-TEST(TracedGlobalSetFinalizationCallbackMarkSweep) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
- ConstructJSApiObject(isolate, isolate->GetCurrentContext(), traced.get());
- CHECK(!traced->IsEmpty());
- {
- v8::HandleScope new_scope(isolate);
- auto local = traced->Get(isolate);
- local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
- local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
- }
- traced->SetFinalizationCallback(traced.get(), FinalizationCallback);
- heap::InvokeMarkSweep();
- CHECK(traced->IsEmpty());
-}
-
TEST(TracePrologueCallingIntoV8WriteBarrier) {
// Regression test: https://crbug.com/940003
if (!FLAG_incremental_marking) return;
@@ -736,34 +604,7 @@ TEST(TracePrologueCallingIntoV8WriteBarrier) {
heap::InvokeMarkSweep();
}
-TEST(TracedGlobalWithDestructor) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto* traced = new v8::TracedGlobal<v8::Object>();
- {
- v8::HandleScope new_scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(traced->IsEmpty());
- *traced = v8::TracedGlobal<v8::Object>(isolate, object);
- CHECK(!traced->IsEmpty());
- CHECK_EQ(initial_count + 1, global_handles->handles_count());
- }
- delete traced;
- CHECK_EQ(initial_count, global_handles->handles_count());
- // GC should not need to clear the handle.
- heap::InvokeMarkSweep();
- CHECK_EQ(initial_count, global_handles->handles_count());
-}
-
-TEST(TracedGlobalNoDestructor) {
+TEST(BasicTracedReference) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -807,24 +648,7 @@ class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
};
// EmbedderHeapTracer that can optimize Scavenger handling when used with
-// TraceGlobal handles that have destructors.
-class EmbedderHeapTracerDestructorNonTracingClearing final
- : public EmptyEmbedderHeapTracer {
- public:
- explicit EmbedderHeapTracerDestructorNonTracingClearing(
- uint16_t class_id_to_optimize)
- : class_id_to_optimize_(class_id_to_optimize) {}
-
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
- return handle.WrapperClassId() != class_id_to_optimize_;
- }
-
- private:
- uint16_t class_id_to_optimize_;
-};
-
-// EmbedderHeapTracer that can optimize Scavenger handling when used with
-// TraceGlobal handles without destructors.
+// TracedReference.
class EmbedderHeapTracerNoDestructorNonTracingClearing final
: public EmptyEmbedderHeapTracer {
public:
@@ -877,33 +701,7 @@ void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
} // namespace
-TEST(TracedGlobalDestructorReclaimedOnScavenge) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- constexpr uint16_t kClassIdToOptimize = 17;
- EmbedderHeapTracerDestructorNonTracingClearing tracer(kClassIdToOptimize);
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto* optimized_handle = new v8::TracedGlobal<v8::Object>();
- auto* non_optimized_handle = new v8::TracedGlobal<v8::Object>();
- SetupOptimizedAndNonOptimizedHandle(isolate, kClassIdToOptimize,
- optimized_handle, non_optimized_handle);
- CHECK_EQ(initial_count + 2, global_handles->handles_count());
- heap::InvokeScavenge();
- CHECK_EQ(initial_count + 1, global_handles->handles_count());
- CHECK(optimized_handle->IsEmpty());
- delete optimized_handle;
- CHECK(!non_optimized_handle->IsEmpty());
- delete non_optimized_handle;
- CHECK_EQ(initial_count, global_handles->handles_count());
-}
-
-TEST(TracedGlobalNoDestructorReclaimedOnScavenge) {
+TEST(TracedReferenceNoDestructorReclaimedOnScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
@@ -999,13 +797,13 @@ enum class TargetHandling {
kInitializedOldGen
};
-template <typename T>
V8_NOINLINE void StackToHeapTest(TestEmbedderHeapTracer* tracer, Operation op,
TargetHandling target_handling) {
v8::Isolate* isolate = CcTest::isolate();
v8::Global<v8::Object> observer;
- T stack_handle;
- T* heap_handle = new T();
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::TracedReference<v8::Value>* heap_handle =
+ new v8::TracedReference<v8::Value>();
if (target_handling != TargetHandling::kNonInitialized) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
@@ -1040,13 +838,13 @@ V8_NOINLINE void StackToHeapTest(TestEmbedderHeapTracer* tracer, Operation op,
delete heap_handle;
}
-template <typename T>
V8_NOINLINE void HeapToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
TargetHandling target_handling) {
v8::Isolate* isolate = CcTest::isolate();
v8::Global<v8::Object> observer;
- T stack_handle;
- T* heap_handle = new T();
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::TracedReference<v8::Value>* heap_handle =
+ new v8::TracedReference<v8::Value>();
if (target_handling != TargetHandling::kNonInitialized) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
@@ -1081,13 +879,12 @@ V8_NOINLINE void HeapToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
delete heap_handle;
}
-template <typename T>
V8_NOINLINE void StackToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
TargetHandling target_handling) {
v8::Isolate* isolate = CcTest::isolate();
v8::Global<v8::Object> observer;
- T stack_handle1;
- T stack_handle2;
+ v8::TracedReference<v8::Value> stack_handle1;
+ v8::TracedReference<v8::Value> stack_handle2;
if (target_handling != TargetHandling::kNonInitialized) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
@@ -1120,7 +917,6 @@ V8_NOINLINE void StackToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
CHECK(observer.IsEmpty());
}
-template <typename T>
V8_NOINLINE void TracedReferenceCleanedTest(TestEmbedderHeapTracer* tracer) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -1129,7 +925,7 @@ V8_NOINLINE void TracedReferenceCleanedTest(TestEmbedderHeapTracer* tracer) {
const size_t before =
CcTest::i_isolate()->global_handles()->NumberOfOnStackHandlesForTesting();
for (int i = 0; i < 100; i++) {
- T stack_handle;
+ v8::TracedReference<v8::Value> stack_handle;
stack_handle.Reset(isolate, object);
}
CHECK_EQ(before + 1, CcTest::i_isolate()
@@ -1137,27 +933,6 @@ V8_NOINLINE void TracedReferenceCleanedTest(TestEmbedderHeapTracer* tracer) {
->NumberOfOnStackHandlesForTesting());
}
-V8_NOINLINE void TracedGlobalDestructorTest(TestEmbedderHeapTracer* tracer) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- {
- v8::TracedGlobal<v8::Value> stack_handle;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle.Reset(isolate, object);
- observer.Reset(isolate, object);
- observer.SetWeak();
- }
- CHECK(!observer.IsEmpty());
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- }
- heap::InvokeMarkSweep();
- CHECK(observer.IsEmpty());
-}
-
} // namespace
TEST(TracedReferenceOnStack) {
@@ -1170,16 +945,6 @@ TEST(TracedReferenceOnStack) {
OnStackTest<v8::TracedReference<v8::Value>>(&tracer);
}
-TEST(TracedGlobalOnStack) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- OnStackTest<v8::TracedGlobal<v8::Value>>(&tracer);
-}
-
TEST(TracedReferenceCleaned) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
@@ -1187,139 +952,55 @@ TEST(TracedReferenceCleaned) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
&tracer);
tracer.SetStackStart(&manual_gc);
- TracedReferenceCleanedTest<v8::TracedReference<v8::Value>>(&tracer);
-}
-
-TEST(TracedGlobalCleaned) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- TracedReferenceCleanedTest<v8::TracedGlobal<v8::Value>>(&tracer);
+ TracedReferenceCleanedTest(&tracer);
}
TEST(TracedReferenceMove) {
- using ReferenceType = v8::TracedReference<v8::Value>;
ManualGCScope manual_gc;
CcTest::InitializeVM();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
&tracer);
tracer.SetStackStart(&manual_gc);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
+ StackToHeapTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
+ StackToHeapTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
+ HeapToStackTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
+ StackToStackTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(&tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
}
TEST(TracedReferenceCopy) {
- using ReferenceType = v8::TracedReference<v8::Value>;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST(TracedGlobalMove) {
- using ReferenceType = v8::TracedGlobal<v8::Value>;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST(TracedGlobalCopy) {
- using ReferenceType = v8::TracedGlobal<v8::Value>;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest<ReferenceType>(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST(TracedGlobalDestructor) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
&tracer);
tracer.SetStackStart(&manual_gc);
- TracedGlobalDestructorTest(&tracer);
+ StackToHeapTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
+ StackToHeapTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
+ HeapToStackTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
+ StackToStackTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(&tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
}
TEST(NotifyEmptyStack) {
diff --git a/deps/v8/test/cctest/test-access-checks.cc b/deps/v8/test/cctest/test-access-checks.cc
index d89039dcb1..28a06cdba9 100644
--- a/deps/v8/test/cctest/test-access-checks.cc
+++ b/deps/v8/test/cctest/test-access-checks.cc
@@ -294,9 +294,9 @@ TEST(AccessCheckWithInterceptor) {
IndexedEnumerator));
global_template->SetNativeDataProperty(
v8_str("cross_context_int"), GetCrossContextInt, SetCrossContextInt);
- global_template->SetNativeDataProperty(
- v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
- v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+ global_template->SetNativeDataProperty(v8_str("all_can_read"), Return42,
+ nullptr, v8::Local<v8::Value>(),
+ v8::None, v8::ALL_CAN_READ);
v8::Local<v8::Context> context0 =
v8::Context::New(isolate, nullptr, global_template);
@@ -386,9 +386,9 @@ TEST(NewRemoteContext) {
IndexedEnumerator));
global_template->SetNativeDataProperty(
v8_str("cross_context_int"), GetCrossContextInt, SetCrossContextInt);
- global_template->SetNativeDataProperty(
- v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
- v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+ global_template->SetNativeDataProperty(v8_str("all_can_read"), Return42,
+ nullptr, v8::Local<v8::Value>(),
+ v8::None, v8::ALL_CAN_READ);
v8::Local<v8::Object> global0 =
v8::Context::NewRemoteContext(isolate, global_template).ToLocalChecked();
@@ -451,9 +451,9 @@ TEST(NewRemoteInstance) {
v8::IndexedPropertyHandlerConfiguration(IndexedGetter, IndexedSetter,
IndexedQuery, IndexedDeleter,
IndexedEnumerator));
- tmpl->SetNativeDataProperty(
- v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
- v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+ tmpl->SetNativeDataProperty(v8_str("all_can_read"), Return42, nullptr,
+ v8::Local<v8::Value>(), v8::None,
+ v8::ALL_CAN_READ);
v8::Local<v8::Object> obj = tmpl->NewRemoteInstance().ToLocalChecked();
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 719bb519b0..7c1799da6a 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -411,11 +411,9 @@ TEST(NativeTemplateAccessorWithSideEffects) {
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("get"), Getter, nullptr, v8::Local<v8::Value>(),
v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
- v8::Local<v8::AccessorSignature>(),
v8::SideEffectType::kHasSideEffect);
templ->SetAccessor(v8_str("set"), Getter, Setter, v8::Local<v8::Value>(),
v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
- v8::Local<v8::AccessorSignature>(),
v8::SideEffectType::kHasNoSideEffect,
v8::SideEffectType::kHasSideEffect);
@@ -551,7 +549,6 @@ TEST(SetAccessorSetSideEffectReceiverCheck2) {
templ->InstanceTemplate()->SetAccessor(
v8_str("bar"), Getter, Setter, v8::Local<v8::Value>(),
v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
- v8::Local<v8::AccessorSignature>(),
v8::SideEffectType::kHasSideEffectToReceiver,
v8::SideEffectType::kHasSideEffectToReceiver);
CHECK(env->Global()
@@ -668,10 +665,10 @@ TEST(ObjectTemplateSetAccessorHasNoSideEffect) {
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"), StringGetter);
- templ->SetAccessor(
- v8_str("foo2"), StringGetter, nullptr, v8::Local<v8::Value>(),
- v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
- v8::Local<v8::AccessorSignature>(), v8::SideEffectType::kHasNoSideEffect);
+ templ->SetAccessor(v8_str("foo2"), StringGetter, nullptr,
+ v8::Local<v8::Value>(), v8::AccessControl::DEFAULT,
+ v8::PropertyAttribute::None,
+ v8::SideEffectType::kHasNoSideEffect);
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
@@ -711,8 +708,8 @@ TEST(ObjectTemplateSetNativePropertyHasNoSideEffect) {
templ->SetNativeDataProperty(v8_str("foo"), Getter);
templ->SetNativeDataProperty(
v8_str("foo2"), Getter, nullptr, v8::Local<v8::Value>(),
- v8::PropertyAttribute::None, v8::Local<v8::AccessorSignature>(),
- v8::AccessControl::DEFAULT, v8::SideEffectType::kHasNoSideEffect);
+ v8::PropertyAttribute::None, v8::AccessControl::DEFAULT,
+ v8::SideEffectType::kHasNoSideEffect);
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index dd4b46fa48..3c57428651 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -20712,201 +20712,6 @@ TEST(StringEmpty) {
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
}
-
-static int instance_checked_getter_count = 0;
-static void InstanceCheckedGetter(
- Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
- .FromJust());
- instance_checked_getter_count++;
- info.GetReturnValue().Set(v8_num(11));
-}
-
-
-static int instance_checked_setter_count = 0;
-static void InstanceCheckedSetter(Local<String> name,
- Local<Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- CHECK(name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
- .FromJust());
- CHECK(value->Equals(info.GetIsolate()->GetCurrentContext(), v8_num(23))
- .FromJust());
- instance_checked_setter_count++;
-}
-
-
-static void CheckInstanceCheckedResult(int getters, int setters,
- bool expects_callbacks,
- TryCatch* try_catch) {
- if (expects_callbacks) {
- CHECK(!try_catch->HasCaught());
- CHECK_EQ(getters, instance_checked_getter_count);
- CHECK_EQ(setters, instance_checked_setter_count);
- } else {
- CHECK(try_catch->HasCaught());
- CHECK_EQ(0, instance_checked_getter_count);
- CHECK_EQ(0, instance_checked_setter_count);
- }
- try_catch->Reset();
-}
-
-
-static void CheckInstanceCheckedAccessors(bool expects_callbacks) {
- instance_checked_getter_count = 0;
- instance_checked_setter_count = 0;
- TryCatch try_catch(CcTest::isolate());
-
- // Test path through generic runtime code.
- CompileRun("obj.foo");
- CheckInstanceCheckedResult(1, 0, expects_callbacks, &try_catch);
- CompileRun("obj.foo = 23");
- CheckInstanceCheckedResult(1, 1, expects_callbacks, &try_catch);
-
- // Test path through generated LoadIC and StoredIC.
- CompileRun(
- "function test_get(o) { o.foo; };"
- "%PrepareFunctionForOptimization(test_get);"
- "test_get(obj);");
- CheckInstanceCheckedResult(2, 1, expects_callbacks, &try_catch);
- CompileRun("test_get(obj);");
- CheckInstanceCheckedResult(3, 1, expects_callbacks, &try_catch);
- CompileRun("test_get(obj);");
- CheckInstanceCheckedResult(4, 1, expects_callbacks, &try_catch);
- CompileRun(
- "function test_set(o) { o.foo = 23; }"
- "%PrepareFunctionForOptimization(test_set);"
- "test_set(obj);");
- CheckInstanceCheckedResult(4, 2, expects_callbacks, &try_catch);
- CompileRun("test_set(obj);");
- CheckInstanceCheckedResult(4, 3, expects_callbacks, &try_catch);
- CompileRun("test_set(obj);");
- CheckInstanceCheckedResult(4, 4, expects_callbacks, &try_catch);
-
- // Test path through optimized code.
- CompileRun("%OptimizeFunctionOnNextCall(test_get);"
- "test_get(obj);");
- CheckInstanceCheckedResult(5, 4, expects_callbacks, &try_catch);
- CompileRun("%OptimizeFunctionOnNextCall(test_set);"
- "test_set(obj);");
- CheckInstanceCheckedResult(5, 5, expects_callbacks, &try_catch);
-
- // Cleanup so that closures start out fresh in next check.
- CompileRun(
- "%DeoptimizeFunction(test_get);"
- "%ClearFunctionFeedback(test_get);"
- "%DeoptimizeFunction(test_set);"
- "%ClearFunctionFeedback(test_set);");
-}
-
-
-THREADED_TEST(InstanceCheckOnInstanceAccessor) {
- v8::internal::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
- Local<ObjectTemplate> inst = templ->InstanceTemplate();
- inst->SetAccessor(v8_str("foo"), InstanceCheckedGetter, InstanceCheckedSetter,
- Local<Value>(), v8::DEFAULT, v8::None,
- v8::AccessorSignature::New(context->GetIsolate(), templ));
- CHECK(context->Global()
- ->Set(context.local(), v8_str("f"),
- templ->GetFunction(context.local()).ToLocalChecked())
- .FromJust());
-
- printf("Testing positive ...\n");
- CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(true);
-
- printf("Testing negative ...\n");
- CompileRun("var obj = {};"
- "obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(false);
-}
-
-static void EmptyInterceptorGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-static void EmptyInterceptorSetter(
- Local<Name> name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
- v8::internal::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
- Local<ObjectTemplate> inst = templ->InstanceTemplate();
- templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- EmptyInterceptorGetter, EmptyInterceptorSetter));
- inst->SetAccessor(v8_str("foo"), InstanceCheckedGetter, InstanceCheckedSetter,
- Local<Value>(), v8::DEFAULT, v8::None,
- v8::AccessorSignature::New(context->GetIsolate(), templ));
- CHECK(context->Global()
- ->Set(context.local(), v8_str("f"),
- templ->GetFunction(context.local()).ToLocalChecked())
- .FromJust());
-
- printf("Testing positive ...\n");
- CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(true);
-
- printf("Testing negative ...\n");
- CompileRun("var obj = {};"
- "obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(false);
-}
-
-
-THREADED_TEST(InstanceCheckOnPrototypeAccessor) {
- v8::internal::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
- Local<ObjectTemplate> proto = templ->PrototypeTemplate();
- proto->SetAccessor(v8_str("foo"), InstanceCheckedGetter,
- InstanceCheckedSetter, Local<Value>(), v8::DEFAULT,
- v8::None,
- v8::AccessorSignature::New(context->GetIsolate(), templ));
- CHECK(context->Global()
- ->Set(context.local(), v8_str("f"),
- templ->GetFunction(context.local()).ToLocalChecked())
- .FromJust());
-
- printf("Testing positive ...\n");
- CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(true);
-
- printf("Testing negative ...\n");
- CompileRun("var obj = {};"
- "obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(false);
-
- printf("Testing positive with modified prototype chain ...\n");
- CompileRun("var obj = new f();"
- "var pro = {};"
- "pro.__proto__ = obj.__proto__;"
- "obj.__proto__ = pro;");
- CHECK(templ->HasInstance(
- context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
- CheckInstanceCheckedAccessors(true);
-}
-
THREADED_TEST(CheckIsLeafTemplateForApiObject) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index f27344ff49..f4ca7e83cf 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -1535,22 +1535,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 8fdc86c2c1..080ee068d0 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -250,7 +250,7 @@ TEST(TickEvents) {
v8::base::TimeDelta::FromMicroseconds(100), true);
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
symbolizer, processor, code_observer);
- profiles->StartProfiling("");
+ ProfilerId id = profiles->StartProfiling().id;
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
*code_observer->code_entries(),
@@ -273,7 +273,7 @@ TEST(TickEvents) {
isolate->logger()->RemoveCodeEventListener(&profiler_listener);
processor->StopSynchronously();
- CpuProfile* profile = profiles->StopProfiling("");
+ CpuProfile* profile = profiles->StopProfiling(id);
CHECK(profile);
// Check call trees.
@@ -409,7 +409,7 @@ TEST(Issue1398) {
v8::base::TimeDelta::FromMicroseconds(100), true);
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
symbolizer, processor, code_observer);
- profiles->StartProfiling("");
+ ProfilerId id = profiles->StartProfiling("").id;
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
*code_observer->code_entries(),
@@ -428,7 +428,7 @@ TEST(Issue1398) {
processor->AddSample(sample);
processor->StopSynchronously();
- CpuProfile* profile = profiles->StopProfiling("");
+ CpuProfile* profile = profiles->StopProfiling(id);
CHECK(profile);
unsigned actual_depth = 0;
@@ -1288,7 +1288,7 @@ static void TickLines(bool optimize) {
v8::base::TimeDelta::FromMicroseconds(100), true);
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
symbolizer, processor, code_observer);
- profiles->StartProfiling("");
+ ProfilerId id = profiles->StartProfiling().id;
// TODO(delphick): Stop using the CpuProfiler internals here: This forces
// LogCompiledFunctions so that source positions are collected everywhere.
// This would normally happen automatically with CpuProfiler::StartProfiling
@@ -1312,7 +1312,7 @@ static void TickLines(bool optimize) {
processor->StopSynchronously();
- CpuProfile* profile = profiles->StopProfiling("");
+ CpuProfile* profile = profiles->StopProfiling(id);
CHECK(profile);
// Check the state of the symbolizer.
@@ -3652,7 +3652,7 @@ TEST(ProflilerSubsampling) {
symbolizer, processor, code_observer);
// Create a new CpuProfile that wants samples at 8us.
- CpuProfile profile(&profiler, "",
+ CpuProfile profile(&profiler, 1, "",
{v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, 8});
// Verify that the first sample is always included.
@@ -3705,38 +3705,47 @@ TEST(DynamicResampling) {
// Add a 10us profiler, verify that the base sampling interval is as high as
// possible (10us).
- profiles->StartProfiling("10us",
+ ProfilerId id_10us =
+ profiles
+ ->StartProfiling("10us",
{v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 10});
+ v8::CpuProfilingOptions::kNoSampleLimit, 10})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(10));
// Add a 5us profiler, verify that the base sampling interval is as high as
// possible given a 10us and 5us profiler (5us).
- profiles->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 5});
+ ProfilerId id_5us =
+ profiles
+ ->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 5})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(5));
// Add a 3us profiler, verify that the base sampling interval is 1us (due to
// coprime intervals).
- profiles->StartProfiling("3us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 3});
+ ProfilerId id_3us =
+ profiles
+ ->StartProfiling("3us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 3})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(1));
// Remove the 5us profiler, verify that the sample interval stays at 1us.
- profiles->StopProfiling("5us");
+ profiles->StopProfiling(id_5us);
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(1));
// Remove the 10us profiler, verify that the sample interval becomes 3us.
- profiles->StopProfiling("10us");
+ profiles->StopProfiling(id_10us);
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(3));
// Remove the 3us profiler, verify that the sample interval becomes unset.
- profiles->StopProfiling("3us");
+ profiles->StopProfiling(id_3us);
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
}
@@ -3767,43 +3776,55 @@ TEST(DynamicResamplingWithBaseInterval) {
// Add a profiler with an unset sampling interval, verify that the common
// sampling interval is equal to the base.
- profiles->StartProfiling("unset", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit});
+ ProfilerId unset_id =
+ profiles
+ ->StartProfiling("unset", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(7));
- profiles->StopProfiling("unset");
+ profiles->StopProfiling(unset_id);
// Adding a 8us sampling interval rounds to a 14us base interval.
- profiles->StartProfiling("8us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 8});
+ ProfilerId id_8us =
+ profiles
+ ->StartProfiling("8us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 8})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(14));
// Adding a 4us sampling interval should cause a lowering to a 7us interval.
- profiles->StartProfiling("4us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 4});
+ ProfilerId id_4us =
+ profiles
+ ->StartProfiling("4us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 4})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(7));
// Removing the 4us sampling interval should restore the 14us sampling
// interval.
- profiles->StopProfiling("4us");
+ profiles->StopProfiling(id_4us);
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(14));
// Removing the 8us sampling interval should unset the common sampling
// interval.
- profiles->StopProfiling("8us");
+ profiles->StopProfiling(id_8us);
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
// A sampling interval of 0us should enforce all profiles to have a sampling
// interval of 0us (the only multiple of 0).
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(0));
- profiles->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
- v8::CpuProfilingOptions::kNoSampleLimit, 5});
+ ProfilerId id_5us =
+ profiles
+ ->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 5})
+ .id;
CHECK_EQ(profiles->GetCommonSamplingInterval(),
base::TimeDelta::FromMicroseconds(0));
- profiles->StopProfiling("5us");
+ profiles->StopProfiling(id_5us);
}
// Tests that functions compiled after a started profiler is stopped are still
@@ -4388,6 +4409,34 @@ v8::Local<v8::Function> CreateApiCode(LocalContext* env) {
return GetFunction(env->local(), foo_name);
}
+TEST(CanStartStopProfilerWithTitlesAndIds) {
+ TestSetup test_setup;
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging);
+ ProfilerId anonymous_id_1 = profiler.StartProfiling().id;
+ ProfilerId title_id = profiler.StartProfiling("title").id;
+ ProfilerId anonymous_id_2 = profiler.StartProfiling().id;
+
+ CHECK_NE(anonymous_id_1, title_id);
+ CHECK_NE(anonymous_id_1, anonymous_id_2);
+ CHECK_NE(anonymous_id_2, title_id);
+
+ CpuProfile* profile_with_title = profiler.StopProfiling("title");
+ CHECK(profile_with_title);
+ CHECK_EQ(title_id, profile_with_title->id());
+
+ CpuProfile* profile_with_id = profiler.StopProfiling(anonymous_id_1);
+ CHECK(profile_with_id);
+ CHECK_EQ(anonymous_id_1, profile_with_id->id());
+
+ CpuProfile* profile_with_id_2 = profiler.StopProfiling(anonymous_id_2);
+ CHECK(profile_with_id_2);
+ CHECK_EQ(anonymous_id_2, profile_with_id_2->id());
+}
+
TEST(FastApiCPUProfiler) {
#if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR)
// None of the following configurations include JSCallReducer.
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 3c9f290eda..5a208d3517 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -44,6 +44,10 @@ namespace internal {
namespace {
+struct TracedReferenceWrapper {
+ v8::TracedReference<v8::Object> handle;
+};
+
// Empty v8::EmbedderHeapTracer that never keeps objects alive on Scavenge. See
// |IsRootForNonTracingGC|.
class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
@@ -58,9 +62,26 @@ class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
void TraceEpilogue(TraceSummary*) final {}
void EnterFinalPause(EmbedderStackState) final {}
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+ bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
return false;
}
+
+ void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ for (auto* wrapper : wrappers_) {
+ if (wrapper->handle == handle) {
+ wrapper->handle.Reset();
+ }
+ }
+ }
+
+ void Register(TracedReferenceWrapper* wrapper) {
+ wrappers_.push_back(wrapper);
+ }
+
+ private:
+ std::vector<TracedReferenceWrapper*> wrappers_;
};
void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
@@ -76,10 +97,6 @@ struct FlagAndGlobal {
v8::Global<v8::Object> handle;
};
-struct TracedGlobalWrapper {
- v8::TracedGlobal<v8::Object> handle;
-};
-
void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->handle.Reset();
data.GetParameter()->flag = true;
@@ -104,12 +121,12 @@ void ConstructJSObject(v8::Isolate* isolate, v8::Global<v8::Object>* global) {
}
void ConstructJSObject(v8::Isolate* isolate,
- v8::TracedGlobal<v8::Object>* traced) {
+ v8::TracedReference<v8::Object>* handle) {
v8::HandleScope scope(isolate);
v8::Local<v8::Object> object(v8::Object::New(isolate));
CHECK(!object.IsEmpty());
- *traced = v8::TracedGlobal<v8::Object>(isolate, object);
- CHECK(!traced->IsEmpty());
+ *handle = v8::TracedReference<v8::Object>(isolate, object);
+ CHECK(!handle->IsEmpty());
}
template <typename HandleContainer>
@@ -150,12 +167,11 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
CHECK_IMPLIES(survives == SurvivalMode::kDies, fp.flag);
}
-template <typename ConstructFunction, typename ModifierFunction,
- typename GCFunction>
-void TracedGlobalTest(v8::Isolate* isolate,
- ConstructFunction construct_function,
- ModifierFunction modifier_function,
- GCFunction gc_function, SurvivalMode survives) {
+template <typename ConstructFunction, typename ModifierFunction>
+void TracedReferenceTestWithScavenge(v8::Isolate* isolate,
+ ConstructFunction construct_function,
+ ModifierFunction modifier_function,
+ SurvivalMode survives) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -163,11 +179,14 @@ void TracedGlobalTest(v8::Isolate* isolate,
NonRootingEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- auto fp = std::make_unique<TracedGlobalWrapper>();
+ auto fp = std::make_unique<TracedReferenceWrapper>();
+ tracer.Register(fp.get());
construct_function(isolate, context, fp.get());
CHECK(heap::InCorrectGeneration(isolate, fp->handle));
modifier_function(fp.get());
- gc_function();
+ InvokeScavenge();
+ // Scavenge clear properly resets the original handle, so we can check the
+ // handle directly here.
CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp->handle.IsEmpty());
CHECK_IMPLIES(survives == SurvivalMode::kDies, fp->handle.IsEmpty());
}
@@ -343,14 +362,13 @@ TEST(WeakHandleToUnmodifiedJSObjectDiesOnScavenge) {
SurvivalMode::kDies);
}
-TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavenge) {
+TEST(TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
- TracedGlobalTest(
- CcTest::isolate(), &ConstructJSObject<TracedGlobalWrapper>,
- [](TracedGlobalWrapper* fp) {}, []() { InvokeScavenge(); },
- SurvivalMode::kSurvives);
+ TracedReferenceTestWithScavenge(
+ CcTest::isolate(), &ConstructJSObject<TracedReferenceWrapper>,
+ [](TracedReferenceWrapper* fp) {}, SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
@@ -382,17 +400,16 @@ TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
SurvivalMode::kDies);
}
-TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavenge) {
+TEST(TracedReferenceToUnmodifiedJSApiObjectDiesOnScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
CcTest::InitializeVM();
- TracedGlobalTest(
- CcTest::isolate(), &ConstructJSApiObject<TracedGlobalWrapper>,
- [](TracedGlobalWrapper* fp) {}, []() { InvokeScavenge(); },
- SurvivalMode::kDies);
+ TracedReferenceTestWithScavenge(
+ CcTest::isolate(), &ConstructJSApiObject<TracedReferenceWrapper>,
+ [](TracedReferenceWrapper* fp) {}, SurvivalMode::kDies);
}
-TEST(TracedGlobalToJSApiObjectWithIdentityHashSurvivesScavenge) {
+TEST(TracedReferenceToJSApiObjectWithIdentityHashSurvivesScavenge) {
if (FLAG_single_generation) return;
ManualGCScope manual_gc;
@@ -401,9 +418,9 @@ TEST(TracedGlobalToJSApiObjectWithIdentityHashSurvivesScavenge) {
HandleScope scope(i_isolate);
Handle<JSWeakMap> weakmap = i_isolate->factory()->NewJSWeakMap();
- TracedGlobalTest(
- CcTest::isolate(), &ConstructJSApiObject<TracedGlobalWrapper>,
- [&weakmap, i_isolate](TracedGlobalWrapper* fp) {
+ TracedReferenceTestWithScavenge(
+ CcTest::isolate(), &ConstructJSApiObject<TracedReferenceWrapper>,
+ [&weakmap, i_isolate](TracedReferenceWrapper* fp) {
v8::HandleScope scope(CcTest::isolate());
Handle<JSReceiver> key =
Utils::OpenHandle(*fp->handle.Get(CcTest::isolate()));
@@ -411,7 +428,7 @@ TEST(TracedGlobalToJSApiObjectWithIdentityHashSurvivesScavenge) {
int32_t hash = key->GetOrCreateHash(i_isolate).value();
JSWeakCollection::Set(weakmap, key, smi, hash);
},
- []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
+ SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
@@ -447,13 +464,13 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
[]() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
}
-TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
+TEST(TracedReferenceToJSApiObjectWithModifiedMapSurvivesScavenge) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
LocalContext context;
- TracedGlobal<v8::Object> handle;
+ TracedReference<v8::Object> handle;
{
v8::HandleScope scope(isolate);
// Create an API object which does not have the same map as constructor.
@@ -469,13 +486,13 @@ TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
CHECK(!handle.IsEmpty());
}
-TEST(TracedGlobalTOJsApiObjectWithElementsSurvivesScavenge) {
+TEST(TracedReferenceTOJsApiObjectWithElementsSurvivesScavenge) {
if (FLAG_single_generation) return;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
LocalContext context;
- TracedGlobal<v8::Object> handle;
+ TracedReference<v8::Object> handle;
{
v8::HandleScope scope(isolate);
@@ -717,13 +734,14 @@ TEST(TotalSizeTracedNode) {
Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope scope(isolate);
- v8::TracedGlobal<v8::Object>* global = new TracedGlobal<v8::Object>();
+ v8::TracedReference<v8::Object>* handle = new TracedReference<v8::Object>();
CHECK_EQ(i_isolate->global_handles()->TotalSize(), 0);
CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
- ConstructJSObject(isolate, global);
+ ConstructJSObject(isolate, handle);
CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
CHECK_GT(i_isolate->global_handles()->UsedSize(), 0);
- delete global;
+ delete handle;
+ InvokeMarkSweep();
CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 55a0441c52..251cd5f705 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -324,22 +324,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm64.cc b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
index 4a4347afa5..d96fc3551f 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
@@ -106,30 +106,20 @@ TEST(DeoptExitSizeIsFixed) {
for (int i = 0; i < kDeoptimizeKindCount; i++) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
- if (kind == DeoptimizeKind::kEagerWithResume) {
- masm.bind(&before_exit);
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- &before_exit);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ // Mirroring logic in code-generator.cc.
+ if (kind == DeoptimizeKind::kLazy) {
+ // CFI emits an extra instruction here.
+ masm.BindExceptionHandler(&before_exit);
} else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- // Mirroring logic in code-generator.cc.
- if (kind == DeoptimizeKind::kLazy) {
- // CFI emits an extra instruction here.
- masm.BindExceptionHandler(&before_exit);
- } else {
- masm.bind(&before_exit);
- }
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- &before_exit);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ masm.bind(&before_exit);
}
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ &before_exit);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
index ee1e58e0f4..63730abbc2 100644
--- a/deps/v8/test/cctest/test-macro-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
@@ -2891,22 +2891,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 1e347d1a54..9e5fdabd15 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -1349,22 +1349,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index f63c66e462..09664f0170 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -1702,22 +1702,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
index 5be85480e2..a98c10933e 100644
--- a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
@@ -1530,23 +1530,20 @@ TEST(DeoptExitSizeIsFixed) {
for (int i = 0; i < kDeoptimizeKindCount; i++) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
- masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ // Mirroring logic in code-generator.cc.
+ if (kind == DeoptimizeKind::kLazy) {
+ // CFI emits an extra instruction here.
+ masm.BindExceptionHandler(&before_exit);
} else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ masm.bind(&before_exit);
}
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ &before_exit);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 430c4a31d9..7e1388bd52 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -1063,22 +1063,13 @@ TEST(DeoptExitSizeIsFixed) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
Label before_exit;
masm.bind(&before_exit);
- if (kind == DeoptimizeKind::kEagerWithResume) {
- Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
- DeoptimizeReason::kDynamicCheckMaps);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- Deoptimizer::kEagerWithResumeBeforeArgsSize);
- } else {
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- nullptr);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
- }
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 4564afd480..de1c42cb16 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -461,7 +461,8 @@ TEST(SampleIds) {
CpuProfiler profiler(isolate);
CpuProfilesCollection profiles(isolate);
profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
+ ProfilerId id =
+ profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers}).id;
CodeEntryStorage storage;
CodeMap code_map(storage);
Symbolizer symbolizer(&code_map);
@@ -509,7 +510,7 @@ TEST(SampleIds) {
sample3.timestamp, symbolized.stack_trace, symbolized.src_line, true,
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
- CpuProfile* profile = profiles.StopProfiling("");
+ CpuProfile* profile = profiles.StopProfiling(id);
unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(7u, nodeId - 1);
@@ -521,11 +522,25 @@ TEST(SampleIds) {
}
}
+TEST(SampleIds_StopProfilingByProfilerId) {
+ TestSetup test_setup;
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfiler profiler(isolate);
+ CpuProfilesCollection profiles(isolate);
+ profiles.set_cpu_profiler(&profiler);
+ CpuProfilingResult result =
+ profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
+ CHECK_EQ(result.status, CpuProfilingStatus::kStarted);
+
+ CpuProfile* profile = profiles.StopProfiling(result.id);
+ CHECK_NE(profile, nullptr);
+}
+
namespace {
class DiscardedSamplesDelegateImpl : public v8::DiscardedSamplesDelegate {
public:
DiscardedSamplesDelegateImpl() : DiscardedSamplesDelegate() {}
- void Notify() override {}
+ void Notify() override { CHECK_GT(GetId(), 0); }
};
class MockPlatform : public TestPlatform {
@@ -589,10 +604,13 @@ TEST(MaxSamplesCallback) {
std::unique_ptr<DiscardedSamplesDelegateImpl> impl =
std::make_unique<DiscardedSamplesDelegateImpl>(
DiscardedSamplesDelegateImpl());
- profiles.StartProfiling("",
+ ProfilerId id =
+ profiles
+ .StartProfiling("",
{v8::CpuProfilingMode::kLeafNodeLineNumbers, 1, 1,
MaybeLocal<v8::Context>()},
- std::move(impl));
+ std::move(impl))
+ .id;
CodeEntryStorage storage;
CodeMap code_map(storage);
@@ -628,7 +646,7 @@ TEST(MaxSamplesCallback) {
CHECK_EQ(1, mock_platform->posted_count());
// Teardown
- profiles.StopProfiling("");
+ profiles.StopProfiling(id);
delete mock_platform;
}
@@ -638,7 +656,7 @@ TEST(NoSamples) {
CpuProfiler profiler(isolate);
CpuProfilesCollection profiles(isolate);
profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("");
+ ProfilerId id = profiles.StartProfiling().id;
CodeEntryStorage storage;
CodeMap code_map(storage);
Symbolizer symbolizer(&code_map);
@@ -656,7 +674,7 @@ TEST(NoSamples) {
v8::base::TimeTicks::Now(), symbolized.stack_trace, symbolized.src_line,
true, base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
- CpuProfile* profile = profiles.StopProfiling("");
+ CpuProfile* profile = profiles.StopProfiling(id);
unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(3u, nodeId - 1);
@@ -731,11 +749,11 @@ TEST(Issue51919) {
base::Vector<char> title = v8::base::Vector<char>::New(16);
base::SNPrintF(title, "%d", i);
CHECK_EQ(CpuProfilingStatus::kStarted,
- collection.StartProfiling(title.begin()));
+ collection.StartProfiling(title.begin()).status);
titles[i] = title.begin();
}
CHECK_EQ(CpuProfilingStatus::kErrorTooManyProfilers,
- collection.StartProfiling("maximum"));
+ collection.StartProfiling("maximum").status);
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
i::DeleteArray(titles[i]);
}