diff options
author | Myles Borins <mylesborins@google.com> | 2019-11-27 03:25:05 -0500 |
---|---|---|
committer | Anna Henningsen <anna@addaleax.net> | 2019-11-30 01:26:32 +0100 |
commit | ea2668d2db76beda812631d73fbcd164aee5fe02 (patch) | |
tree | 5e755edc6a93fedfb4638955d13ee02cb15816f0 /deps | |
parent | 08a40e20087ad0ba9ad5ee03a2c07a336b3cc5de (diff) | |
download | node-new-ea2668d2db76beda812631d73fbcd164aee5fe02.tar.gz |
deps: patch V8 to 7.9.317.25
Refs: https://github.com/v8/v8/compare/7.9.317.23...7.9.317.25
PR-URL: https://github.com/nodejs/node/pull/30679
Reviewed-By: Michaƫl Zasso <targos@protonmail.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r-- | deps/v8/include/v8-version.h | 2 | ||||
-rw-r--r-- | deps/v8/src/builtins/builtins-wasm-gen.cc | 27 | ||||
-rw-r--r-- | deps/v8/src/compiler/heap-refs.h | 1 | ||||
-rw-r--r-- | deps/v8/src/compiler/js-call-reducer.cc | 11 | ||||
-rw-r--r-- | deps/v8/src/compiler/map-inference.cc | 7 | ||||
-rw-r--r-- | deps/v8/src/compiler/map-inference.h | 1 | ||||
-rw-r--r-- | deps/v8/test/mjsunit/regress/regress-crbug-1024758.js | 37 |
7 files changed, 69 insertions, 17 deletions
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 8970c573ef..e11961895d 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 7 #define V8_MINOR_VERSION 9 #define V8_BUILD_NUMBER 317 -#define V8_PATCH_LEVEL 23 +#define V8_PATCH_LEVEL 25 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index 12270495c1..d6346fb9aa 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -121,18 +121,19 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and count TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> count_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(count_heap, ChangeUint32ToFloat64(count)); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmAtomicNotify, centry, NoContextConstant(), instance, + Runtime::kWasmAtomicNotify, centry, context, instance, address_heap, count_heap)); ReturnRaw(SmiToInt32(result_smi)); } @@ -149,23 +150,24 @@ TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and expected_value TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> expected_value_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_heap, ChangeInt32ToFloat64(expected_value)); TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(timeout_heap, timeout); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmI32AtomicWait, centry, NoContextConstant(), instance, + Runtime::kWasmI32AtomicWait, centry, context, instance, address_heap, expected_value_heap, timeout_heap)); ReturnRaw(SmiToInt32(result_smi)); } @@ -184,28 +186,29 @@ TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and expected_value TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> expected_value_high_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_high_heap, ChangeUint32ToFloat64(expected_value_high)); TNode<HeapNumber> expected_value_low_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_low_heap, ChangeUint32ToFloat64(expected_value_low)); TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(timeout_heap, timeout); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmI64AtomicWait, centry, NoContextConstant(), instance, + Runtime::kWasmI64AtomicWait, centry, context, instance, address_heap, expected_value_high_heap, expected_value_low_heap, timeout_heap)); ReturnRaw(SmiToInt32(result_smi)); diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index c6322ebe69..f08e49832e 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -389,6 +389,7 @@ class ContextRef : public HeapObjectRef { V(JSFunction, object_function) \ V(JSFunction, promise_function) \ V(JSFunction, promise_then) \ + V(JSFunction, regexp_function) \ V(JSFunction, string_function) \ V(JSFunction, symbol_function) \ V(JSGlobalObject, global_object) \ diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index d400fa2673..b86b1e6baf 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -7098,11 +7098,14 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { Node* control = NodeProperties::GetControlInput(node); Node* regexp = NodeProperties::GetValueInput(node, 1); + // Only the initial JSRegExp map is valid here, since the following lastIndex + // check as well as the lowered builtin call rely on a known location of the + // lastIndex field. + Handle<Map> regexp_initial_map = + native_context().regexp_function().initial_map().object(); + MapInference inference(broker(), regexp, effect); - if (!inference.HaveMaps() || - !inference.AllOfInstanceTypes(InstanceTypeChecker::IsJSRegExp)) { - return inference.NoChange(); - } + if (!inference.Is(regexp_initial_map)) return inference.NoChange(); MapHandles const& regexp_maps = inference.GetMaps(); ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc index 1e2434f4ae..6ce036aa0b 100644 --- a/deps/v8/src/compiler/map-inference.cc +++ b/deps/v8/src/compiler/map-inference.cc @@ -91,6 +91,13 @@ MapHandles const& MapInference::GetMaps() { return maps_; } +bool MapInference::Is(Handle<Map> expected_map) { + if (!HaveMaps()) return false; + const MapHandles& maps = GetMaps(); + if (maps.size() != 1) return false; + return maps[0].equals(expected_map); +} + void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control, const FeedbackSource& feedback) { diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h index acba2eb0f2..498b6bc15e 100644 --- a/deps/v8/src/compiler/map-inference.h +++ b/deps/v8/src/compiler/map-inference.h @@ -55,6 +55,7 @@ class MapInference { V8_WARN_UNUSED_RESULT MapHandles const& GetMaps(); V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes( std::function<bool(InstanceType)> f); + V8_WARN_UNUSED_RESULT bool Is(Handle<Map> expected_map); // These methods provide a guard. // diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1024758.js b/deps/v8/test/mjsunit/regress/regress-crbug-1024758.js new file mode 100644 index 0000000000..d6f77ee0f0 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1024758.js @@ -0,0 +1,37 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax + +function f() { + return r.test("abc"); +} + +function to_dict(o) { + r.a = 42; + r.b = 42; + delete r.a; +} + +function to_fast(o) { + const obj = {}; + const obj2 = {}; + delete o.a; + obj.__proto__ = o; + obj[0] = 1; + obj.__proto__ = obj2; + delete obj[0]; + return o; +} + +// Shrink the instance size by first transitioning to dictionary properties, +// then back to fast properties. +const r = /./; +to_dict(r); +to_fast(r); + +%PrepareFunctionForOptimization(f); +assertTrue(f()); +%OptimizeFunctionOnNextCall(f); +assertTrue(f()); |