summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/js-native-context-specialization.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/js-native-context-specialization.cc')
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1466
1 files changed, 760 insertions, 706 deletions
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e975867645..b6336828e5 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -13,6 +13,7 @@
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/frame-states.h"
#include "src/compiler/graph-assembler.h"
@@ -59,15 +60,15 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
- CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
+ Zone* zone, Zone* shared_zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
flags_(flags),
- global_object_(broker->target_native_context().global_object().object()),
+ global_object_(
+ broker->target_native_context().global_object(broker).object()),
global_proxy_(
- broker->target_native_context().global_proxy_object().object()),
- dependencies_(dependencies),
+ broker->target_native_context().global_proxy_object(broker).object()),
zone_(zone),
shared_zone_(shared_zone),
type_cache_(TypeCache::Get()),
@@ -262,9 +263,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
DCHECK(shared.is_compiled());
int register_count =
shared.internal_formal_parameter_count_without_receiver() +
- shared.GetBytecodeArray().register_count();
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ shared.GetBytecodeArray(broker()).register_count();
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
return NoChange();
}
@@ -531,15 +532,15 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
return NoChange();
}
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- MapRef function_map = function.map();
- HeapObjectRef function_prototype = function_map.prototype();
+ MapRef function_map = function.map(broker());
+ HeapObjectRef function_prototype = function_map.prototype(broker());
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
- Node* value = jsgraph()->Constant(function_prototype);
+ Node* value = jsgraph()->Constant(function_prototype, broker());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -579,11 +580,11 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
}
JSFunctionRef this_function_ref = m.Ref(broker()).AsJSFunction();
- MapRef function_map = this_function_ref.map();
- HeapObjectRef current = function_map.prototype();
+ MapRef function_map = this_function_ref.map(broker());
+ HeapObjectRef current = function_map.prototype(broker());
// The uppermost JSFunction on the class hierarchy (above it, there can be
// other JSObjects, e.g., Proxies).
- base::Optional<JSObjectRef> last_function;
+ OptionalJSObjectRef last_function;
Node* return_value;
Node* ctor_or_instance;
@@ -597,16 +598,19 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
JSFunctionRef current_function = current.AsJSFunction();
// If there are class fields, bail out. TODO(v8:13091): Handle them here.
- if (current_function.shared().requires_instance_members_initializer()) {
+ if (current_function.shared(broker())
+ .requires_instance_members_initializer()) {
return NoChange();
}
// If there are private methods, bail out. TODO(v8:13091): Handle them here.
- if (current_function.context().scope_info().ClassScopeHasPrivateBrand()) {
+ if (current_function.context(broker())
+ .scope_info(broker())
+ .ClassScopeHasPrivateBrand()) {
return NoChange();
}
- FunctionKind kind = current_function.shared().kind();
+ FunctionKind kind = current_function.shared(broker()).kind();
if (kind != FunctionKind::kDefaultDerivedConstructor) {
// The hierarchy walk will end here; this is the last change to bail out
@@ -620,7 +624,7 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
return_value = jsgraph()->BooleanConstant(true);
// Generate a builtin call for creating the instance.
- Node* constructor = jsgraph()->Constant(current_function);
+ Node* constructor = jsgraph()->Constant(current_function, broker());
// In the current FrameState setup, the two outputs of this bytecode are
// poked at indices slot(index(reg_2)) (boolean_output) and
@@ -650,13 +654,13 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
n.context(), new_frame_state, effect, control);
} else {
return_value = jsgraph()->BooleanConstant(false);
- ctor_or_instance = jsgraph()->Constant(current_function);
+ ctor_or_instance = jsgraph()->Constant(current_function, broker());
}
break;
}
// Keep walking up the class tree.
- current = current_function.map().prototype();
+ current = current_function.map(broker()).prototype(broker());
}
dependencies()->DependOnStablePrototypeChain(
@@ -699,7 +703,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Check if the right hand side is a known {receiver}, or
// we have feedback from the InstanceOfIC.
- base::Optional<JSObjectRef> receiver;
+ OptionalJSObjectRef receiver;
HeapObjectMatcher m(constructor);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
receiver = m.Ref(broker()).AsJSObject();
@@ -714,10 +718,10 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (!receiver.has_value()) return NoChange();
- MapRef receiver_map = receiver->map();
- NameRef name = MakeRef(broker(), isolate()->factory()->has_instance_symbol());
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- receiver_map, name, AccessMode::kLoad, dependencies());
+ MapRef receiver_map = receiver->map(broker());
+ NameRef name = broker()->has_instance_symbol();
+ PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(receiver_map, name, AccessMode::kLoad);
// TODO(v8:11457) Support dictionary mode holders here.
if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
@@ -725,7 +729,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
access_info.RecordDependencies(dependencies());
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
@@ -750,14 +754,14 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
if (access_info.IsFastDataConstant()) {
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
bool found_on_proto = holder.has_value();
JSObjectRef holder_ref = found_on_proto ? holder.value() : receiver.value();
- base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
- access_info.field_representation(), access_info.field_index(),
+ OptionalObjectRef constant = holder_ref.GetOwnFastDataProperty(
+ broker(), access_info.field_representation(), access_info.field_index(),
dependencies());
if (!constant.has_value() || !constant->IsHeapObject() ||
- !constant->AsHeapObject().map().is_callable()) {
+ !constant->AsHeapObject().map(broker()).is_callable()) {
return NoChange();
}
@@ -786,7 +790,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
0, frame_state, ContinuationFrameStateMode::LAZY);
// Call the @@hasInstance handler.
- Node* target = jsgraph()->Constant(*constant);
+ Node* target = jsgraph()->Constant(*constant, broker());
Node* feedback = jsgraph()->UndefinedConstant();
// Value inputs plus context, frame state, effect, control.
static_assert(JSCallNode::ArityForArgc(1) + 4 == 8);
@@ -846,17 +850,17 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false;
break;
}
- HeapObjectRef map_prototype = map.prototype();
+ HeapObjectRef map_prototype = map.prototype(broker());
if (map_prototype.equals(prototype)) {
none = false;
break;
}
- map = map_prototype.map();
+ map = map_prototype.map(broker());
// TODO(v8:11457) Support dictionary mode protoypes here.
if (!map.is_stable() || map.is_dictionary_map()) {
return kMayBeInPrototypeChain;
}
- if (map.oddball_type() == OddballType::kNull) {
+ if (map.oddball_type(broker()) == OddballType::kNull) {
all = false;
break;
}
@@ -866,7 +870,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
if (!all && !none) return kMayBeInPrototypeChain;
{
- base::Optional<JSObjectRef> last_prototype;
+ OptionalJSObjectRef last_prototype;
if (all) {
// We don't need to protect the full chain if we found the prototype, we
// can stop at {prototype}. In fact we could stop at the one before
@@ -874,7 +878,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
// might be a different object each time, so it's much simpler to include
// {prototype}. That does, however, mean that we must check {prototype}'s
// map stability.
- if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
+ if (!prototype.map(broker()).is_stable()) return kMayBeInPrototypeChain;
last_prototype = prototype.AsJSObject();
}
WhereToStart start = result == NodeProperties::kUnreliableMaps
@@ -930,7 +934,8 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(function.bound_target_function()),
+ node,
+ jsgraph()->Constant(function.bound_target_function(broker()), broker()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -945,14 +950,14 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() ||
- !function.has_instance_prototype(dependencies()) ||
- function.PrototypeRequiresRuntimeLookup(dependencies())) {
+ if (!function.map(broker()).has_prototype_slot() ||
+ !function.has_instance_prototype(broker()) ||
+ function.PrototypeRequiresRuntimeLookup(broker())) {
return NoChange();
}
ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
- Node* prototype_constant = jsgraph()->Constant(prototype);
+ Node* prototype_constant = jsgraph()->Constant(prototype, broker());
// Lower the {node} to JSHasInPrototypeChain.
NodeProperties::ReplaceValueInput(node, object, 0);
@@ -977,7 +982,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
if (!m.HasResolvedValue() ||
- !m.Ref(broker()).equals(native_context().promise_function())) {
+ !m.Ref(broker()).equals(native_context().promise_function(broker()))) {
return NoChange();
}
@@ -1015,13 +1020,11 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// Compute property access info for "then" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
for (const MapRef& map : resolution_maps) {
access_infos.push_back(broker()->GetPropertyAccessInfo(
- map, MakeRef(broker(), isolate()->factory()->then_string()),
- AccessMode::kLoad, dependencies()));
+ map, broker()->then_string(), AccessMode::kLoad));
}
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
@@ -1080,14 +1083,14 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* lookup_start_object, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode, Node* key,
PropertyCellRef const& property_cell, Node* effect) {
- if (!property_cell.Cache()) {
+ if (!property_cell.Cache(broker())) {
TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell);
return NoChange();
}
- ObjectRef property_cell_value = property_cell.value();
+ ObjectRef property_cell_value = property_cell.value(broker());
if (property_cell_value.IsHeapObject() &&
- property_cell_value.AsHeapObject().map().oddball_type() ==
+ property_cell_value.AsHeapObject().map(broker()).oddball_type(broker()) ==
OddballType::kHole) {
// The property cell is no longer valid.
return NoChange();
@@ -1115,7 +1118,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
} else if (property_cell_type == PropertyCellType::kConstantType) {
// We rely on stability further below.
if (property_cell_value.IsHeapObject() &&
- !property_cell_value.AsHeapObject().map().is_stable()) {
+ !property_cell_value.AsHeapObject().map(broker()).is_stable()) {
return NoChange();
}
}
@@ -1143,8 +1146,10 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(
- native_context().global_proxy_object().map().object())),
+ ZoneHandleSet<Map>(native_context()
+ .global_proxy_object(broker())
+ .map(broker())
+ .object())),
lookup_start_object, effect, control);
}
@@ -1154,7 +1159,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(property_cell_value);
+ : jsgraph()->Constant(property_cell_value, broker());
} else {
// Record a code dependency on the cell if we can benefit from the
// additional feedback, or the global property is configurable (i.e.
@@ -1169,10 +1174,10 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
property_details.cell_type() == PropertyCellType::kUndefined) {
value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(property_cell_value);
+ : jsgraph()->Constant(property_cell_value, broker());
DCHECK(!property_cell_value.IsHeapObject() ||
- property_cell_value.AsHeapObject().map().oddball_type() !=
- OddballType::kHole);
+ property_cell_value.AsHeapObject().map(broker()).oddball_type(
+ broker()) != OddballType::kHole);
} else {
DCHECK_NE(AccessMode::kHas, access_mode);
@@ -1190,8 +1195,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
MapRef property_cell_value_map =
- property_cell_value.AsHeapObject().map();
- property_cell_value_type = Type::For(property_cell_value_map);
+ property_cell_value.AsHeapObject().map(broker());
+ property_cell_value_type =
+ Type::For(property_cell_value_map, broker());
representation = MachineRepresentation::kTaggedPointer;
// We can only use the property cell value map for map check
@@ -1206,7 +1212,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
value = effect = graph()->NewNode(
simplified()->LoadField(ForPropertyCellValue(
representation, property_cell_value_type, map, name)),
- jsgraph()->Constant(property_cell), effect, control);
+ jsgraph()->Constant(property_cell, broker()), effect, control);
}
}
} else if (access_mode == AccessMode::kStore) {
@@ -1217,9 +1223,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
dependencies()->DependOnGlobalProperty(property_cell);
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), value,
- jsgraph()->Constant(property_cell_value));
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(), value,
+ jsgraph()->Constant(property_cell_value, broker()));
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kValueMismatch), check,
effect, control);
@@ -1234,7 +1240,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_cell_value.IsHeapObject()) {
MapRef property_cell_value_map =
- property_cell_value.AsHeapObject().map();
+ property_cell_value.AsHeapObject().map(broker());
dependencies()->DependOnStableMap(property_cell_value_map);
// Check that the {value} is a HeapObject.
@@ -1258,8 +1264,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
representation, property_cell_value_type,
MaybeHandle<Map>(), name)),
- jsgraph()->Constant(property_cell), value,
- effect, control);
+ jsgraph()->Constant(property_cell, broker()),
+ value, effect, control);
break;
}
case PropertyCellType::kMutable: {
@@ -1270,7 +1276,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
simplified()->StoreField(ForPropertyCellValue(
MachineRepresentation::kTagged, Type::NonInternal(),
MaybeHandle<Map>(), name)),
- jsgraph()->Constant(property_cell), value, effect, control);
+ jsgraph()->Constant(property_cell, broker()), value, effect,
+ control);
break;
}
case PropertyCellType::kUndefined:
@@ -1297,7 +1304,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
GlobalAccessFeedback const& feedback = processed.AsGlobalAccess();
if (feedback.IsScriptContextSlot()) {
Effect effect = n.effect();
- Node* script_context = jsgraph()->Constant(feedback.script_context());
+ Node* script_context =
+ jsgraph()->Constant(feedback.script_context(), broker());
Node* value = effect =
graph()->NewNode(javascript()->LoadContext(0, feedback.slot_index(),
feedback.immutable()),
@@ -1305,7 +1313,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(broker()),
+ return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(),
AccessMode::kLoad, nullptr,
feedback.property_cell());
} else {
@@ -1329,14 +1337,15 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
if (feedback.immutable()) return NoChange();
Effect effect = n.effect();
Control control = n.control();
- Node* script_context = jsgraph()->Constant(feedback.script_context());
+ Node* script_context =
+ jsgraph()->Constant(feedback.script_context(), broker());
effect =
graph()->NewNode(javascript()->StoreContext(0, feedback.slot_index()),
value, script_context, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(broker()),
+ return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(),
AccessMode::kStore, nullptr,
feedback.property_cell());
} else {
@@ -1397,7 +1406,7 @@ Reduction JSNativeContextSpecialization::ReduceMegaDOMPropertyAccess(
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kWrongInstanceType), check,
effect, control);
- } else if (function_template_info.is_signature_undefined()) {
+ } else if (function_template_info.is_signature_undefined(broker())) {
// Signature is undefined, enough to check if the receiver is a JSApiObject.
Node* check =
graph()->NewNode(simplified()->NumberEqual(), receiver_instance_type,
@@ -1417,10 +1426,10 @@ Reduction JSNativeContextSpecialization::ReduceMegaDOMPropertyAccess(
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* inputs[8] = {jsgraph()->HeapConstant(callable.code()),
- jsgraph()->Constant(function_template_info),
+ jsgraph()->Constant(function_template_info, broker()),
jsgraph()->Constant(stack_arg_count),
lookup_start_object,
- jsgraph()->Constant(native_context()),
+ jsgraph()->Constant(native_context(), broker()),
frame_state,
effect,
control};
@@ -1494,10 +1503,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (inferred_maps.size() == 1) {
MapRef lookup_start_object_map = inferred_maps[0];
if (lookup_start_object_map.equals(
- native_context().global_proxy_object().map())) {
- if (!native_context().GlobalIsDetached()) {
- base::Optional<PropertyCellRef> cell =
- native_context().global_object().GetPropertyCell(feedback.name());
+ native_context().global_proxy_object(broker()).map(broker()))) {
+ if (!native_context().GlobalIsDetached(broker())) {
+ OptionalPropertyCellRef cell =
+ native_context().global_object(broker()).GetPropertyCell(
+ broker(), feedback.name());
if (!cell.has_value()) return NoChange();
// Note: The map check generated by ReduceGlobalAccesses ensures that we
// will deopt when/if GlobalIsDetached becomes true.
@@ -1514,20 +1524,20 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
for (const MapRef& map : inferred_maps) {
if (map.is_deprecated()) continue;
- // TODO(v8:12547): Support writing to shared structs, which needs a write
- // barrier that calls Object::Share to ensure the RHS is shared.
- if (InstanceTypeChecker::IsJSSharedStruct(map.instance_type()) &&
+ // TODO(v8:12547): Support writing to objects in shared space, which need
+ // a write barrier that calls Object::Share to ensure the RHS is shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
+ map.instance_type()) &&
access_mode == AccessMode::kStore) {
return NoChange();
}
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map, feedback.name(), access_mode, dependencies());
+ PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode);
access_infos_for_feedback.push_back(access_info);
}
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
if (!access_info_factory.FinalizePropertyAccessInfos(
access_infos_for_feedback, access_mode, &access_infos)) {
return NoChange();
@@ -1547,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if_exceptions = &if_exception_nodes;
}
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
@@ -1744,6 +1754,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// trimming.
return NoChange();
}
+
values.push_back(continuation->value());
effects.push_back(continuation->effect());
controls.push_back(continuation->control());
@@ -1797,29 +1808,27 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
NamedAccess const& p = n.Parameters();
Node* const receiver = n.object();
- NameRef name = p.name(broker());
+ NameRef name = p.name();
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
if (m.HasResolvedValue()) {
ObjectRef object = m.Ref(broker());
- if (object.IsJSFunction() &&
- name.equals(MakeRef(broker(), factory()->prototype_string()))) {
+ if (object.IsJSFunction() && name.equals(broker()->prototype_string())) {
// Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() ||
- !function.has_instance_prototype(dependencies()) ||
- function.PrototypeRequiresRuntimeLookup(dependencies())) {
+ if (!function.map(broker()).has_prototype_slot() ||
+ !function.has_instance_prototype(broker()) ||
+ function.PrototypeRequiresRuntimeLookup(broker())) {
return NoChange();
}
ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
- Node* value = jsgraph()->Constant(prototype);
+ Node* value = jsgraph()->Constant(prototype, broker());
ReplaceWithValue(node, value);
return Replace(value);
- } else if (object.IsString() &&
- name.equals(MakeRef(broker(), factory()->length_string()))) {
+ } else if (object.IsString() && name.equals(broker()->length_string())) {
// Constant-fold "length" property on constant strings.
Node* value = jsgraph()->Constant(object.AsString().length());
ReplaceWithValue(node, value);
@@ -1836,7 +1845,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
Node* node) {
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
- NameRef name = p.name(broker());
+ NameRef name = p.name();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
@@ -1892,7 +1901,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
}
// Load iterator property operator
- NameRef iterator_symbol = MakeRef(broker(), factory()->iterator_symbol());
+ NameRef iterator_symbol = broker()->iterator_symbol();
const Operator* load_op =
javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
@@ -2014,7 +2023,7 @@ Reduction JSNativeContextSpecialization::ReduceJSSetNamedProperty(Node* node) {
JSSetNamedPropertyNode n(node);
NamedAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ return ReducePropertyAccess(node, nullptr, p.name(), n.value(),
FeedbackSource(p.feedback()), AccessMode::kStore);
}
@@ -2023,7 +2032,7 @@ Reduction JSNativeContextSpecialization::ReduceJSDefineNamedOwnProperty(
JSDefineNamedOwnPropertyNode n(node);
DefineNamedOwnPropertyParameters const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ return ReducePropertyAccess(node, nullptr, p.name(), n.value(),
FeedbackSource(p.feedback()),
AccessMode::kStoreInLiteral);
}
@@ -2058,8 +2067,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
namespace {
-base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
- Node* receiver) {
+OptionalJSTypedArrayRef GetTypedArrayConstant(JSHeapBroker* broker,
+ Node* receiver) {
HeapObjectMatcher m(receiver);
if (!m.HasResolvedValue()) return base::nullopt;
ObjectRef object = m.Ref(broker);
@@ -2073,14 +2082,15 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
void JSNativeContextSpecialization::RemoveImpossibleMaps(
Node* object, ZoneVector<MapRef>* maps) const {
- base::Optional<MapRef> root_map = InferRootMap(object);
+ OptionalMapRef root_map = InferRootMap(object);
if (root_map.has_value() && !root_map->is_abandoned_prototype_map()) {
- maps->erase(std::remove_if(maps->begin(), maps->end(),
- [root_map](const MapRef& map) {
- return map.is_abandoned_prototype_map() ||
- !map.FindRootMap().equals(*root_map);
- }),
- maps->end());
+ maps->erase(
+ std::remove_if(maps->begin(), maps->end(),
+ [root_map, this](const MapRef& map) {
+ return map.is_abandoned_prototype_map() ||
+ !map.FindRootMap(broker()).equals(*root_map);
+ }),
+ maps->end());
}
}
@@ -2146,8 +2156,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
refined_feedback.keyed_mode());
}
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(refined_feedback,
&access_infos) ||
@@ -2155,6 +2164,16 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
return NoChange();
}
+ // Do not optimize AccessMode::kDefine for typed arrays.
+ if (access_mode == AccessMode::kDefine) {
+ for (const ElementAccessInfo& access_info : access_infos) {
+ if (IsTypedArrayOrRabGsabTypedArrayElementsKind(
+ access_info.elements_kind())) {
+ return NoChange();
+ }
+ }
+ }
+
// For holey stores or growing stores, we need to check that the prototype
// chain contains no setters for elements, and we need to guard those checks
// via code dependencies on the relevant prototype maps.
@@ -2172,13 +2191,14 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
IsGrowStoreMode(feedback.keyed_mode().store_mode())) &&
!receiver_map.HasOnlyStablePrototypesWithFastElements(
- &prototype_maps)) {
+ broker(), &prototype_maps)) {
return NoChange();
}
- // TODO(v8:12547): Support writing to shared structs, which needs a
- // write barrier that calls Object::Share to ensure the RHS is shared.
- if (InstanceTypeChecker::IsJSSharedStruct(
+ // TODO(v8:12547): Support writing to objects in shared space, which
+ // need a write barrier that calls Object::Share to ensure the RHS is
+ // shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
receiver_map.instance_type())) {
return NoChange();
}
@@ -2199,7 +2219,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Check for the monomorphic case.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_infos.size() == 1) {
ElementAccessInfo access_info = access_infos.front();
@@ -2344,9 +2364,10 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
HeapObjectMatcher mreceiver(receiver);
HeapObjectRef receiver_ref = mreceiver.Ref(broker());
- if (receiver_ref.map().oddball_type() == OddballType::kHole ||
- receiver_ref.map().oddball_type() == OddballType::kNull ||
- receiver_ref.map().oddball_type() == OddballType::kUndefined ||
+ if (receiver_ref.map(broker()).oddball_type(broker()) == OddballType::kHole ||
+ receiver_ref.map(broker()).oddball_type(broker()) == OddballType::kNull ||
+ receiver_ref.map(broker()).oddball_type(broker()) ==
+ OddballType::kUndefined ||
// The 'in' operator throws a TypeError on primitive values.
(receiver_ref.IsString() && access_mode == AccessMode::kHas)) {
return NoChange();
@@ -2359,27 +2380,28 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
mkey.IsInRange(0.0, static_cast<double>(JSObject::kMaxElementIndex))) {
static_assert(JSObject::kMaxElementIndex <= kMaxUInt32);
const uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
- base::Optional<ObjectRef> element;
+ OptionalObjectRef element;
if (receiver_ref.IsJSObject()) {
JSObjectRef jsobject_ref = receiver_ref.AsJSObject();
- base::Optional<FixedArrayBaseRef> elements =
- jsobject_ref.elements(kRelaxedLoad);
+ OptionalFixedArrayBaseRef elements =
+ jsobject_ref.elements(broker(), kRelaxedLoad);
if (elements.has_value()) {
- element = jsobject_ref.GetOwnConstantElement(*elements, index,
+ element = jsobject_ref.GetOwnConstantElement(broker(), *elements, index,
dependencies());
if (!element.has_value() && receiver_ref.IsJSArray()) {
// We didn't find a constant element, but if the receiver is a
// cow-array we can exploit the fact that any future write to the
// element will replace the whole elements storage.
- element = receiver_ref.AsJSArray().GetOwnCowElement(*elements, index);
+ element = receiver_ref.AsJSArray().GetOwnCowElement(broker(),
+ *elements, index);
if (element.has_value()) {
Node* actual_elements = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
receiver, effect, control);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- actual_elements,
- jsgraph()->Constant(*elements));
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(), actual_elements,
+ jsgraph()->Constant(*elements, broker()));
effect = graph()->NewNode(
simplified()->CheckIf(
DeoptimizeReason::kCowArrayElementsChanged),
@@ -2388,13 +2410,14 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
}
}
} else if (receiver_ref.IsString()) {
- element = receiver_ref.AsString().GetCharAsStringOrUndefined(index);
+ element =
+ receiver_ref.AsString().GetCharAsStringOrUndefined(broker(), index);
}
if (element.has_value()) {
Node* value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(*element);
+ : jsgraph()->Constant(*element, broker());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -2420,7 +2443,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
}
Reduction JSNativeContextSpecialization::ReducePropertyAccess(
- Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
+ Node* node, Node* key, OptionalNameRef static_name, Node* value,
FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -2642,7 +2665,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
}
}
- Node* target = jsgraph()->Constant(constant);
+ Node* target = jsgraph()->Constant(constant, broker());
// Introduce the call to the getter function.
Node* value;
if (constant.IsJSFunction()) {
@@ -2660,7 +2683,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
}
Node* api_holder =
access_info.api_holder().has_value()
- ? jsgraph()->Constant(access_info.api_holder().value())
+ ? jsgraph()->Constant(access_info.api_holder().value(), broker())
: receiver;
value = InlineApiCall(receiver, api_holder, frame_state, nullptr, effect,
control, constant.AsFunctionTemplateInfo());
@@ -2682,7 +2705,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
ObjectRef constant = access_info.constant().value();
- Node* target = jsgraph()->Constant(constant);
+ Node* target = jsgraph()->Constant(constant, broker());
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
Node* feedback = jsgraph()->UndefinedConstant();
@@ -2695,7 +2718,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
} else {
Node* api_holder =
access_info.api_holder().has_value()
- ? jsgraph()->Constant(access_info.api_holder().value())
+ ? jsgraph()->Constant(access_info.api_holder().value(), broker())
: receiver;
InlineApiCall(receiver, api_holder, frame_state, value, effect, control,
constant.AsFunctionTemplateInfo());
@@ -2715,12 +2738,13 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* api_holder, Node* frame_state, Node* value,
Node** effect, Node** control,
FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.call_code().has_value()) {
+ if (!function_template_info.call_code(broker()).has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
return nullptr;
}
- CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
+ CallHandlerInfoRef call_handler_info =
+ *function_template_info.call_code(broker());
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
@@ -2734,7 +2758,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
- Node* data = jsgraph()->Constant(call_handler_info.data());
+ Node* data = jsgraph()->Constant(call_handler_info.data(broker()), broker());
ApiFunction function(call_handler_info.callback());
Node* function_reference =
graph()->NewNode(common()->ExternalConstant(ExternalReference::Create(
@@ -2742,7 +2766,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* code = jsgraph()->HeapConstant(call_api_callback.code());
// Add CallApiCallbackStub's register argument as well.
- Node* context = jsgraph()->Constant(native_context());
+ Node* context = jsgraph()->Constant(native_context(), broker());
Node* inputs[11] = {code, function_reference, jsgraph()->Constant(argc),
data, api_holder, receiver};
int index = 6 + argc;
@@ -2766,7 +2790,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
Node* effect, Node* control, NameRef const& name,
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value() && !access_info.HasDictionaryHolder()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
@@ -2787,7 +2811,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
receiver, receiver_mode, lookup_start_object, context, frame_state,
&effect, &control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell = jsgraph()->Constant(access_info.constant().value().AsCell());
+ Node* cell =
+ jsgraph()->Constant(access_info.constant().value().AsCell(), broker());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2797,7 +2822,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
} else {
DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant() ||
access_info.IsDictionaryProtoDataConstant());
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_info.IsDictionaryProtoDataConstant()) {
auto maybe_value =
access_builder.FoldLoadDictPrototypeConstant(access_info);
@@ -2821,7 +2846,7 @@ JSNativeContextSpecialization::BuildPropertyTest(
DCHECK(!access_info.HasDictionaryHolder());
// Determine actual holder and perform prototype chain checks.
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
@@ -2864,8 +2889,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- base::Optional<JSObjectRef> holder = access_info.holder();
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value()) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
DCHECK_NE(AccessMode::kDefine, access_mode);
@@ -2897,9 +2922,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()),
storage, effect, control);
}
- bool store_to_existing_constant_field = access_info.IsFastDataConstant() &&
- access_mode == AccessMode::kStore &&
- !access_info.HasTransitionMap();
+ if (access_info.IsFastDataConstant() && access_mode == AccessMode::kStore &&
+ !access_info.HasTransitionMap()) {
+ Node* deoptimize = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kStoreToConstant),
+ jsgraph()->FalseConstant(), effect, control);
+ return ValueEffectControl(jsgraph()->UndefinedConstant(), deoptimize,
+ control);
+ }
FieldAccess field_access = {
kTaggedBase,
field_index.offset(),
@@ -2919,11 +2949,10 @@ JSNativeContextSpecialization::BuildPropertyStore(
effect, control);
if (access_info.HasTransitionMap()) {
// Allocate a HeapNumber for the new property.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
Type::OtherInternal());
- a.Store(AccessBuilder::ForMap(),
- MakeRef(broker(), factory()->heap_number_map()));
+ a.Store(AccessBuilder::ForMap(), broker()->heap_number_map());
FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue();
value_field_access.const_field_info = field_access.const_field_info;
a.Store(value_field_access, value);
@@ -2952,40 +2981,11 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_access.name = MaybeHandle<Name>();
field_access.machine_type = MachineType::Float64();
}
- if (store_to_existing_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect = graph()->NewNode(
- simplified()->LoadField(field_access), storage, effect, control);
-
- Node* check =
- graph()->NewNode(simplified()->SameValue(), current_value, value);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
- effect, control);
- return ValueEffectControl(value, effect, control);
- }
break;
}
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- if (store_to_existing_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect = graph()->NewNode(
- simplified()->LoadField(field_access), storage, effect, control);
-
- Node* check = graph()->NewNode(simplified()->SameValueNumbersOnly(),
- current_value, value);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
- effect, control);
- return ValueEffectControl(value, effect, control);
- }
-
if (field_representation == MachineRepresentation::kTaggedSigned) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(FeedbackSource()), value, effect, control);
@@ -2993,7 +2993,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
} else if (field_representation ==
MachineRepresentation::kTaggedPointer) {
- base::Optional<MapRef> field_map = access_info.field_map();
+ OptionalMapRef field_map = access_info.field_map();
if (field_map.has_value()) {
// Emit a map check for the value.
effect =
@@ -3028,12 +3028,12 @@ JSNativeContextSpecialization::BuildPropertyStore(
UNREACHABLE();
}
// Check if we need to perform a transitioning store.
- base::Optional<MapRef> transition_map = access_info.transition_map();
+ OptionalMapRef transition_map = access_info.transition_map();
if (transition_map.has_value()) {
// Check if we need to grow the properties backing store
// with this transitioning store.
MapRef transition_map_ref = transition_map.value();
- MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
+ MapRef original_map = transition_map_ref.GetBackPointer(broker()).AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -3054,7 +3054,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map_ref), effect, control);
+ jsgraph()->Constant(transition_map_ref, broker()), effect, control);
effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
@@ -3079,7 +3079,6 @@ JSNativeContextSpecialization::ReduceJSDefineKeyedOwnPropertyInLiteral(
NumberMatcher mflags(n.flags());
CHECK(mflags.HasResolvedValue());
DefineKeyedOwnPropertyInLiteralFlags cflags(mflags.ResolvedValue());
- DCHECK(!(cflags & DefineKeyedOwnPropertyInLiteralFlag::kDontEnum));
if (cflags & DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName)
return NoChange();
@@ -3145,347 +3144,524 @@ JSNativeContextSpecialization::BuildElementAccess(
if (IsTypedArrayElementsKind(elements_kind) ||
IsRabGsabTypedArrayElementsKind(elements_kind)) {
- Node* buffer_or_receiver = receiver;
- Node* length;
- Node* base_pointer;
- Node* external_pointer;
-
- // Check if we can constant-fold information about the {receiver} (e.g.
- // for asm.js-like code patterns).
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() &&
- !IsRabGsabTypedArrayElementsKind(elements_kind)) {
- // TODO(v8:11111): Add support for rab/gsab here.
- length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
+ return BuildElementAccessForTypedArrayOrRabGsabTypedArray(
+ receiver, index, value, effect, control, context, elements_kind,
+ keyed_mode);
+ }
- DCHECK(!typed_array->is_on_heap());
- // Load the (known) data pointer for the {receiver} and set {base_pointer}
- // and {external_pointer} to the values that will allow to generate typed
- // element accesses using the known data pointer.
- // The data pointer might be invalid if the {buffer} was detached,
- // so we need to make sure that any access is properly guarded.
- base_pointer = jsgraph()->ZeroConstant();
- external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
- } else {
- // Load the {receiver}s length.
- JSGraphAssembler assembler(jsgraph_, zone(), BranchSemantics::kJS,
- [this](Node* n) { this->Revisit(n); });
- assembler.InitializeEffectControl(effect, control);
- length = assembler.TypedArrayLength(
- TNode<JSTypedArray>::UncheckedCast(receiver), {elements_kind},
- TNode<Context>::UncheckedCast(context));
- std::tie(effect, control) =
- ReleaseEffectAndControlFromAssembler(&assembler);
-
- // Load the base pointer for the {receiver}. This will always be Smi
- // zero unless we allow on-heap TypedArrays, which is only the case
- // for Chrome. Node and Electron both set this limit to 0. Setting
- // the base to Smi zero here allows the EffectControlLinearizer to
- // optimize away the tricky part of the access later.
- if (JSTypedArray::kMaxSizeInHeap == 0) {
- base_pointer = jsgraph()->ZeroConstant();
- } else {
- base_pointer = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSTypedArrayBasePointer()),
- receiver, effect, control);
- }
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
- // Load the external pointer for the {receiver}.
- external_pointer = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSTypedArrayExternalPointer()),
- receiver, effect, control);
- }
-
- // See if we can skip the detaching check.
- if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
- // Load the buffer for the {receiver}.
- Node* buffer =
- typed_array.has_value()
- ? jsgraph()->Constant(typed_array->buffer())
- : (effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control));
-
- // Deopt if the {buffer} was detached.
- // Note: A detached buffer leads to megamorphic feedback.
- Node* buffer_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
- jsgraph()->ZeroConstant());
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached),
- check, effect, control);
-
- // Retain the {buffer} instead of {receiver} to reduce live ranges.
- buffer_or_receiver = buffer;
- }
-
- enum Situation { kBoundsCheckDone, kHandleOOB_SmiCheckDone };
- Situation situation;
- if ((keyed_mode.IsLoad() &&
- keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
- (keyed_mode.IsStore() &&
- keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
- // Only check that the {index} is in SignedSmall range. We do the actual
- // bounds check below and just skip the property access if it's out of
- // bounds for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckSmi(FeedbackSource()), index, effect, control);
+ // Don't try to store to a copy-on-write backing store (unless supported by
+ // the store mode).
+ if (IsAnyStore(keyed_mode.access_mode()) &&
+ IsSmiOrObjectElementsKind(elements_kind) &&
+ !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ elements, effect, control);
+ }
- // Cast the {index} to Unsigned32 range, so that the bounds checks
- // below are performed on unsigned values, which means that all the
- // Negative32 values are treated as out-of-bounds.
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- situation = kHandleOOB_SmiCheckDone;
- } else {
- // Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, effect, control);
- situation = kBoundsCheckDone;
+ // Check if the {receiver} is a JSArray.
+ bool receiver_is_jsarray = HasOnlyJSArrayMaps(broker(), receiver_maps);
+
+ // Load the length of the {receiver}.
+ Node* length = effect =
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, effect, control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ // Check if we might need to grow the {elements} backing store.
+ if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
+ // For growing stores we validate the {index} below.
+ } else if (keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ // Check that the {index} is a valid array index, we do the actual
+ // bounds check below and just skip the store below if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, jsgraph()->Constant(Smi::kMaxValue), effect, control);
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
+ }
+
+ // Compute the element access.
+ Type element_type = Type::NonInternal();
+ MachineType element_machine_type = MachineType::AnyTagged();
+ if (IsDoubleElementsKind(elements_kind)) {
+ element_type = Type::Number();
+ element_machine_type = MachineType::Float64();
+ } else if (IsSmiElementsKind(elements_kind)) {
+ element_type = Type::SignedSmall();
+ element_machine_type = MachineType::TaggedSigned();
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
+
+ // Access the actual element.
+ if (keyed_mode.access_mode() == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (IsHoleyElementsKind(elements_kind)) {
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+ }
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ element_access.machine_type = MachineType::AnyTagged();
}
- // Access the actual element.
- ExternalArrayType external_array_type =
- GetArrayTypeFromElementsKind(elements_kind);
- switch (keyed_mode.access_mode()) {
- case AccessMode::kLoad: {
- // Check if we can return undefined for out-of-bounds loads.
- if (situation == kHandleOOB_SmiCheckDone) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- // Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination
- // of the NumberLessThan above.
- index = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(),
- CheckBoundsFlag::kConvertStringAndMinusZero |
- CheckBoundsFlag::kAbortOnOutOfBounds),
- index, length, etrue, if_true);
-
- // Perform the actual load
- vtrue = etrue = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index,
- etrue, if_true);
- }
+ // Check if we can return undefined for out-of-bounds loads.
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination of
+ // the NumberLessThan above.
+ index = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // Materialize undefined for out-of-bounds loads.
- vfalse = jsgraph()->UndefinedConstant();
- }
+ // Perform the actual load
+ vtrue = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access),
+ elements, index, etrue, if_true);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ // Handle loading from holey backing stores correctly, by either
+ // mapping the hole to undefined if possible, or deoptimizing
+ // otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ vtrue);
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ vtrue = etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()),
+ vtrue, etrue, if_true);
+ }
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // Materialize undefined for out-of-bounds loads.
+ vfalse = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, control);
+ } else {
+ // Perform the actual load.
+ value = effect =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ index, effect, control);
+
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Turn the hole into undefined.
+ value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ value);
} else {
- // Perform the actual load.
- DCHECK_EQ(kBoundsCheckDone, situation);
- value = effect = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index, effect,
- control);
+ // Bailout if we see the hole.
+ value = effect = graph()->NewNode(simplified()->CheckNotTaggedHole(),
+ value, effect, control);
+ }
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
+ // Check if we are allowed to return the hole directly.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ mode = CheckFloat64HoleMode::kAllowReturnHole;
}
- break;
- }
- case AccessMode::kStoreInLiteral:
- case AccessMode::kDefine:
- UNREACHABLE();
- case AccessMode::kStore: {
- // Ensure that the {value} is actually a Number or an Oddball,
- // and truncate it to a Number appropriately.
value = effect = graph()->NewNode(
- simplified()->SpeculativeToNumber(
- NumberOperationHint::kNumberOrOddball, FeedbackSource()),
- value, effect, control);
+ simplified()->CheckFloat64Hole(mode, FeedbackSource()), value,
+ effect, control);
+ }
+ }
+ } else if (keyed_mode.access_mode() == AccessMode::kHas) {
+ // For packed arrays with NoElementsProctector valid, a bound check
+ // is equivalent to HasProperty.
+ value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ index, length, effect, control);
+ if (IsHoleyElementsKind(elements_kind)) {
+ // If the index is in bounds, do a load and hole check.
- // Introduce the appropriate truncation for {value}. Currently we
- // only need to do this for ClamedUint8Array {receiver}s, as the
- // other truncations are implicit in the StoreTypedElement, but we
- // might want to change that at some point.
- if (external_array_type == kExternalUint8ClampedArray) {
- value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
- }
+ Node* branch = graph()->NewNode(common()->Branch(), value, control);
- if (situation == kHandleOOB_SmiCheckDone) {
- // We have to detect OOB stores and handle them without deopt (by
- // simply not performing them).
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- // Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination
- // of the NumberLessThan above.
- index = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(),
- CheckBoundsFlag::kConvertStringAndMinusZero |
- CheckBoundsFlag::kAbortOnOutOfBounds),
- index, length, etrue, if_true);
-
- // Perform the actual store.
- etrue = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index,
- value, etrue, if_true);
- }
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->FalseConstant();
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Just ignore the out-of-bounds write.
- }
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ element_access.machine_type = MachineType::AnyTagged();
+ }
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* checked = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, etrue, if_true);
+
+ Node* element = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ checked, etrue, if_true);
+
+ Node* vtrue;
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
} else {
- // Perform the actual store
- DCHECK_EQ(kBoundsCheckDone, situation);
- effect = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index, value,
- effect, control);
+ vtrue =
+ graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
}
- break;
- }
- case AccessMode::kHas:
- if (situation == kHandleOOB_SmiCheckDone) {
- value = effect =
- graph()->NewNode(simplified()->SpeculativeNumberLessThan(
- NumberOperationHint::kSignedSmall),
- index, length, effect, control);
+
+ // has == !IsHole
+ vtrue = graph()->NewNode(simplified()->BooleanNot(), vtrue);
+ } else {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Bailout if we see the hole.
+ etrue = graph()->NewNode(simplified()->CheckNotTaggedHole(), element,
+ etrue, if_true);
} else {
- DCHECK_EQ(kBoundsCheckDone, situation);
- // For has-property on a typed array, all we need is a bounds check.
- value = jsgraph()->TrueConstant();
+ etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()),
+ element, etrue, if_true);
}
- break;
+
+ vtrue = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
}
} else {
- // Load the elements for the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
+ DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
+ keyed_mode.access_mode() == AccessMode::kStoreInLiteral ||
+ keyed_mode.access_mode() == AccessMode::kDefine);
+
+ if (IsSmiElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(FeedbackSource()), value, effect, control);
+ } else if (IsDoubleElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckNumber(FeedbackSource()), value, effect, control);
+ // Make sure we do not store signalling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
- // Don't try to store to a copy-on-write backing store (unless supported by
- // the store mode).
- if (IsAnyStore(keyed_mode.access_mode()) &&
- IsSmiOrObjectElementsKind(elements_kind) &&
- !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
- effect = graph()->NewNode(
- simplified()->CheckMaps(
- CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ // Ensure that copy-on-write backing store is writable.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ keyed_mode.store_mode() == STORE_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, effect, control);
+ } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
+ // Determine the length of the {elements} backing store.
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
elements, effect, control);
- }
- // Check if the {receiver} is a JSArray.
- bool receiver_is_jsarray = HasOnlyJSArrayMaps(broker(), receiver_maps);
-
- // Load the length of the {receiver}.
- Node* length = effect =
- receiver_is_jsarray
- ? graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(elements_kind)),
- receiver, effect, control)
- : graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // Check if we might need to grow the {elements} backing store.
- if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
- // For growing stores we validate the {index} below.
- } else if (keyed_mode.IsLoad() &&
- keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
- CanTreatHoleAsUndefined(receiver_maps)) {
- // Check that the {index} is a valid array index, we do the actual
- // bounds check below and just skip the store below if it's out of
- // bounds for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, jsgraph()->Constant(Smi::kMaxValue), effect, control);
- } else {
- // Check that the {index} is in the valid range for the {receiver}.
+ // Validate the {index} depending on holeyness:
+ //
+ // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
+ // backing store capacity plus the maximum allowed gap, as otherwise
+ // the (potential) backing store growth would normalize and thus
+ // the elements kind of the {receiver} would change to slow mode.
+ //
+ // For PACKED_*_ELEMENTS the {index} must be within the range
+ // [0,length+1[ to be valid. In case {index} equals {length},
+ // the {receiver} will be extended, but kept packed.
+ Node* limit =
+ IsHoleyElementsKind(elements_kind)
+ ? graph()->NewNode(simplified()->NumberAdd(), elements_length,
+ jsgraph()->Constant(JSObject::kMaxGap))
+ : graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->OneConstant());
index = effect = graph()->NewNode(
simplified()->CheckBounds(
FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, effect, control);
+ index, limit, effect, control);
+
+ // Grow {elements} backing store if necessary.
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(elements_kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, FeedbackSource()), receiver,
+ elements, index, elements_length, effect, control);
+
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, effect, control);
+ }
+
+ // Also update the "length" property if {receiver} is a JSArray.
+ if (receiver_is_jsarray) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ {
+ // We don't need to do anything, the {index} is within
+ // the valid bounds for the JSArray {receiver}.
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ Node* new_length = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ efalse = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, new_length, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ }
}
- // Compute the element access.
- Type element_type = Type::NonInternal();
- MachineType element_machine_type = MachineType::AnyTagged();
- if (IsDoubleElementsKind(elements_kind)) {
- element_type = Type::Number();
- element_machine_type = MachineType::Float64();
- } else if (IsSmiElementsKind(elements_kind)) {
- element_type = Type::SignedSmall();
- element_machine_type = MachineType::TaggedSigned();
+ // Perform the actual element access.
+ effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ elements, index, value, effect, control);
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::
+ BuildElementAccessForTypedArrayOrRabGsabTypedArray(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ Node* context, ElementsKind elements_kind,
+ KeyedAccessMode const& keyed_mode) {
+ DCHECK(IsTypedArrayElementsKind(elements_kind) ||
+ IsRabGsabTypedArrayElementsKind(elements_kind));
+ DCHECK_IMPLIES(IsRabGsabTypedArrayElementsKind(elements_kind),
+ v8_flags.turbo_rab_gsab);
+ // AccessMode::kDefine is not handled here. Optimization should be skipped by
+ // caller.
+ DCHECK(keyed_mode.access_mode() != AccessMode::kDefine);
+
+ Node* buffer_or_receiver = receiver;
+ Node* length;
+ Node* base_pointer;
+ Node* external_pointer;
+
+ // Check if we can constant-fold information about the {receiver} (e.g.
+ // for asm.js-like code patterns).
+ OptionalJSTypedArrayRef typed_array =
+ GetTypedArrayConstant(broker(), receiver);
+ if (typed_array.has_value() &&
+ // TODO(v8:11111): Add support for rab/gsab here.
+ !IsRabGsabTypedArrayElementsKind(elements_kind)) {
+ if (typed_array->map(broker()).elements_kind() != elements_kind) {
+ // This case should never be reachable at runtime.
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(),
+ BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ assembler.Unreachable();
+ ReleaseEffectAndControlFromAssembler(&assembler);
+ Node* dead = jsgraph_->Dead();
+ return ValueEffectControl{dead, dead, dead};
+ } else {
+ length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
+
+ DCHECK(!typed_array->is_on_heap());
+ // Load the (known) data pointer for the {receiver} and set
+ // {base_pointer} and {external_pointer} to the values that will allow
+ // to generate typed element accesses using the known data pointer. The
+ // data pointer might be invalid if the {buffer} was detached, so we
+ // need to make sure that any access is properly guarded.
+ base_pointer = jsgraph()->ZeroConstant();
+ external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
+ }
+ } else {
+ // Load the {receiver}s length.
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(), BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ length = assembler.TypedArrayLength(
+ TNode<JSTypedArray>::UncheckedCast(receiver), {elements_kind},
+ TNode<Context>::UncheckedCast(context));
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+
+ // Load the base pointer for the {receiver}. This will always be Smi
+ // zero unless we allow on-heap TypedArrays, which is only the case
+ // for Chrome. Node and Electron both set this limit to 0. Setting
+ // the base to Smi zero here allows the EffectControlLinearizer to
+ // optimize away the tricky part of the access later.
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
+ base_pointer = jsgraph()->ZeroConstant();
+ } else {
+ base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayBasePointer()),
+ receiver, effect, control);
}
- ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier};
- // Access the actual element.
- if (keyed_mode.access_mode() == AccessMode::kLoad) {
- // Compute the real element access type, which includes the hole in case
- // of holey backing stores.
- if (IsHoleyElementsKind(elements_kind)) {
- element_access.type =
- Type::Union(element_type, Type::Hole(), graph()->zone());
- }
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
- }
+ // Load the external pointer for the {receiver}.
+ external_pointer = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSTypedArrayExternalPointer()),
+ receiver, effect, control);
+ }
+ // See if we can skip the detaching check.
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // Load the buffer for the {receiver}.
+ Node* buffer =
+ typed_array.has_value()
+ ? jsgraph()->Constant(typed_array->buffer(broker()), broker())
+ : (effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control));
+
+ // Deopt if the {buffer} was detached.
+ // Note: A detached buffer leads to megamorphic feedback.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ jsgraph()->ZeroConstant());
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached), check,
+ effect, control);
+
+ // Retain the {buffer} instead of {receiver} to reduce live ranges.
+ buffer_or_receiver = buffer;
+ }
+
+ enum Situation { kBoundsCheckDone, kHandleOOB_SmiAndRangeCheckComputed };
+ Situation situation;
+ TNode<BoolT> check;
+ if ((keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
+ (keyed_mode.IsStore() &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
+ // Only check that the {index} is in SignedSmall range. We do the actual
+ // bounds check below and just skip the property access if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(simplified()->CheckSmi(FeedbackSource()),
+ index, effect, control);
+ TNode<Boolean> compare_length = TNode<Boolean>::UncheckedCast(
+ graph()->NewNode(simplified()->NumberLessThan(), index, length));
+
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(), BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ TNode<BoolT> check_less_than_length =
+ assembler.EnterMachineGraph<BoolT>(compare_length, UseInfo::Bool());
+ TNode<Int32T> index_int32 = assembler.EnterMachineGraph<Int32T>(
+ TNode<Smi>::UncheckedCast(index), UseInfo::TruncatingWord32());
+ TNode<BoolT> check_non_negative =
+ assembler.Int32LessThanOrEqual(assembler.Int32Constant(0), index_int32);
+ check = TNode<BoolT>::UncheckedCast(
+ assembler.Word32And(check_less_than_length, check_non_negative));
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+
+ situation = kHandleOOB_SmiAndRangeCheckComputed;
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
+ situation = kBoundsCheckDone;
+ }
+
+ // Access the actual element.
+ ExternalArrayType external_array_type =
+ GetArrayTypeFromElementsKind(elements_kind);
+ switch (keyed_mode.access_mode()) {
+ case AccessMode::kLoad: {
// Check if we can return undefined for out-of-bounds loads.
- if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
- CanTreatHoleAsUndefined(receiver_maps)) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ DCHECK_NE(check, nullptr);
+ Node* branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue;
{
// Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination of
- // the NumberLessThan above.
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
index = etrue =
graph()->NewNode(simplified()->CheckBounds(
FeedbackSource(),
@@ -3494,26 +3670,10 @@ JSNativeContextSpecialization::BuildElementAccess(
index, length, etrue, if_true);
// Perform the actual load
- vtrue = etrue =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, index, etrue, if_true);
-
- // Handle loading from holey backing stores correctly, by either
- // mapping the hole to undefined if possible, or deoptimizing
- // otherwise.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Turn the hole into undefined.
- vtrue = graph()->NewNode(
- simplified()->ConvertTaggedHoleToUndefined(), vtrue);
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // Return the signaling NaN hole directly if all uses are
- // truncating.
- vtrue = etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()),
- vtrue, etrue, if_true);
- }
+ vtrue = etrue = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, etrue,
+ if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3532,214 +3692,109 @@ JSNativeContextSpecialization::BuildElementAccess(
vtrue, vfalse, control);
} else {
// Perform the actual load.
- value = effect =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, index, effect, control);
-
- // Handle loading from holey backing stores correctly, by either mapping
- // the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Check if we are allowed to turn the hole into undefined.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Turn the hole into undefined.
- value = graph()->NewNode(
- simplified()->ConvertTaggedHoleToUndefined(), value);
- } else {
- // Bailout if we see the hole.
- value = effect = graph()->NewNode(
- simplified()->CheckNotTaggedHole(), value, effect, control);
- }
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // Perform the hole check on the result.
- CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
- // Check if we are allowed to return the hole directly.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Return the signaling NaN hole directly if all uses are
- // truncating.
- mode = CheckFloat64HoleMode::kAllowReturnHole;
- }
- value = effect = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode, FeedbackSource()), value,
- effect, control);
- }
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ value = effect = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, effect,
+ control);
+ }
+ break;
+ }
+ case AccessMode::kStoreInLiteral:
+ case AccessMode::kDefine:
+ UNREACHABLE();
+ case AccessMode::kStore: {
+ if (external_array_type == kExternalBigInt64Array ||
+ external_array_type == kExternalBigUint64Array) {
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToBigInt(BigIntOperationHint::kBigInt,
+ FeedbackSource()),
+ value, effect, control);
+ } else {
+ // Ensure that the {value} is actually a Number or an Oddball,
+ // and truncate it to a Number appropriately.
+ // TODO(panq): Eliminate the deopt loop introduced by the speculation.
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, FeedbackSource()),
+ value, effect, control);
}
- } else if (keyed_mode.access_mode() == AccessMode::kHas) {
- // For packed arrays with NoElementsProctector valid, a bound check
- // is equivalent to HasProperty.
- value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
- NumberOperationHint::kSignedSmall),
- index, length, effect, control);
- if (IsHoleyElementsKind(elements_kind)) {
- // If the index is in bounds, do a load and hole check.
-
- Node* branch = graph()->NewNode(common()->Branch(), value, control);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse = jsgraph()->FalseConstant();
- element_access.type =
- Type::Union(element_type, Type::Hole(), graph()->zone());
+ // Introduce the appropriate truncation for {value}. Currently we
+ // only need to do this for ClamedUint8Array {receiver}s, as the
+ // other truncations are implicit in the StoreTypedElement, but we
+ // might want to change that at some point.
+ if (external_array_type == kExternalUint8ClampedArray) {
+ value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
+ }
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
- }
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ // We have to detect OOB stores and handle them without deopt (by
+ // simply not performing them).
+ DCHECK_NE(check, nullptr);
+ Node* branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
+ {
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
+ index = etrue =
+ graph()->NewNode(simplified()->CheckBounds(
+ FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
- Node* checked = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, etrue, if_true);
-
- Node* element = etrue =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, checked, etrue, if_true);
-
- Node* vtrue;
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Check if we are allowed to turn the hole into undefined.
- // Turn the hole into undefined.
- vtrue = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- } else {
- vtrue =
- graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
- }
-
- // has == !IsHole
- vtrue = graph()->NewNode(simplified()->BooleanNot(), vtrue);
- } else {
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Bailout if we see the hole.
- etrue = graph()->NewNode(simplified()->CheckNotTaggedHole(),
- element, etrue, if_true);
- } else {
- etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()),
- element, etrue, if_true);
- }
+ // Perform the actual store.
+ etrue = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, value,
+ etrue, if_true);
+ }
- vtrue = jsgraph()->TrueConstant();
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Just ignore the out-of-bounds write.
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect =
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- }
- } else {
- DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
- keyed_mode.access_mode() == AccessMode::kStoreInLiteral ||
- keyed_mode.access_mode() == AccessMode::kDefine);
-
- if (IsSmiElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(
- simplified()->CheckSmi(FeedbackSource()), value, effect, control);
- } else if (IsDoubleElementsKind(elements_kind)) {
- value = effect =
- graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
- effect, control);
- // Make sure we do not store signalling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
- }
-
- // Ensure that copy-on-write backing store is writable.
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- keyed_mode.store_mode() == STORE_HANDLE_COW) {
- elements = effect =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, effect, control);
- } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
- // Determine the length of the {elements} backing store.
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // Validate the {index} depending on holeyness:
- //
- // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
- // backing store capacity plus the maximum allowed gap, as otherwise
- // the (potential) backing store growth would normalize and thus
- // the elements kind of the {receiver} would change to slow mode.
- //
- // For PACKED_*_ELEMENTS the {index} must be within the range
- // [0,length+1[ to be valid. In case {index} equals {length},
- // the {receiver} will be extended, but kept packed.
- Node* limit =
- IsHoleyElementsKind(elements_kind)
- ? graph()->NewNode(simplified()->NumberAdd(), elements_length,
- jsgraph()->Constant(JSObject::kMaxGap))
- : graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->OneConstant());
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, limit, effect, control);
-
- // Grow {elements} backing store if necessary.
- GrowFastElementsMode mode =
- IsDoubleElementsKind(elements_kind)
- ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode, FeedbackSource()),
- receiver, elements, index, elements_length, effect, control);
-
- // If we didn't grow {elements}, it might still be COW, in which case we
- // copy it now.
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
- elements = effect =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, effect, control);
- }
-
- // Also update the "length" property if {receiver} is a JSArray.
- if (receiver_is_jsarray) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- // We don't need to do anything, the {index} is within
- // the valid bounds for the JSArray {receiver}.
- }
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- Node* new_length = graph()->NewNode(
- simplified()->NumberAdd(), index, jsgraph()->OneConstant());
- efalse = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(elements_kind)),
- receiver, new_length, efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- }
+ } else {
+ // Perform the actual store
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ effect = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, value,
+ effect, control);
}
-
- // Perform the actual element access.
- effect = graph()->NewNode(simplified()->StoreElement(element_access),
- elements, index, value, effect, control);
+ break;
}
+ case AccessMode::kHas:
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ DCHECK_NE(check, nullptr);
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(),
+ BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ value = assembler.MachineSelectIf<Boolean>(check)
+ .Then([&]() { return assembler.TrueConstant(); })
+ .Else([&]() { return assembler.FalseConstant(); })
+ .ExpectTrue()
+ .Value();
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+ } else {
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ // For has-property on a typed array, all we need is a bounds check.
+ value = jsgraph()->TrueConstant();
+ }
+ break;
}
return ValueEffectControl(value, effect, control);
@@ -3856,7 +3911,7 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
new_length_and_hash, effect, control);
// Allocate and initialize the new properties.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(PropertyArray::SizeFor(new_length), AllocationType::kYoung,
Type::OtherInternal());
a.Store(AccessBuilder::ForMap(), jsgraph()->PropertyArrayMapConstant());
@@ -3875,8 +3930,8 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
Operator const* const op =
name.IsSymbol() ? simplified()->CheckEqualsSymbol()
: simplified()->CheckEqualsInternalizedString();
- return graph()->NewNode(op, jsgraph()->Constant(name), value, effect,
- control);
+ return graph()->NewNode(op, jsgraph()->Constant(name, broker()), value,
+ effect, control);
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
@@ -3885,7 +3940,7 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
for (MapRef receiver_map : receiver_maps) {
- ObjectRef receiver_prototype = receiver_map.prototype();
+ ObjectRef receiver_prototype = receiver_map.prototype(broker());
if (!receiver_prototype.IsJSObject() ||
!broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
return false;
@@ -3920,17 +3975,16 @@ bool JSNativeContextSpecialization::InferMaps(Node* object, Effect effect,
return false;
}
-base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
- Node* object) const {
+OptionalMapRef JSNativeContextSpecialization::InferRootMap(Node* object) const {
HeapObjectMatcher m(object);
if (m.HasResolvedValue()) {
- MapRef map = m.Ref(broker()).map();
- return map.FindRootMap();
+ MapRef map = m.Ref(broker()).map(broker());
+ return map.FindRootMap(broker());
} else if (m.IsJSCreate()) {
- base::Optional<MapRef> initial_map =
+ OptionalMapRef initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- DCHECK(initial_map->equals(initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap(broker())));
return *initial_map;
}
}