summaryrefslogtreecommitdiff
path: root/deps/v8/src/objects/elements.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-07-14 11:30:07 +0200
committerMichaël Zasso <targos@protonmail.com>2021-07-20 15:24:51 +0200
commit6cdd310275bb0f8056aa0ae6d95614e9ca5b70c7 (patch)
tree9ed37b19cd668894854b7f469010f7621e63ef81 /deps/v8/src/objects/elements.cc
parentc0f10006c82d2d9896a552de98ed146f9542720d (diff)
downloadnode-new-6cdd310275bb0f8056aa0ae6d95614e9ca5b70c7.tar.gz
deps: update V8 to 9.2.230.21
PR-URL: https://github.com/nodejs/node/pull/38990 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Robert Nagy <ronagy@icloud.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/objects/elements.cc')
-rw-r--r--deps/v8/src/objects/elements.cc870
1 files changed, 631 insertions, 239 deletions
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 9b1c7936bb..e70ae48900 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -4,6 +4,7 @@
#include "src/objects/elements.h"
+#include "src/base/atomicops.h"
#include "src/common/message-template.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
@@ -59,6 +60,17 @@
// - Uint8ClampedElementsAccessor
// - BigUint64ElementsAccessor
// - BigInt64ElementsAccessor
+// - RabGsabUint8ElementsAccessor
+// - RabGsabInt8ElementsAccessor
+// - RabGsabUint16ElementsAccessor
+// - RabGsabInt16ElementsAccessor
+// - RabGsabUint32ElementsAccessor
+// - RabGsabInt32ElementsAccessor
+// - RabGsabFloat32ElementsAccessor
+// - RabGsabFloat64ElementsAccessor
+// - RabGsabUint8ClampedElementsAccessor
+// - RabGsabBigUint64ElementsAccessor
+// - RabGsabBigInt64ElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
@@ -129,7 +141,19 @@ enum Where { AT_START, AT_END };
V(Float64ElementsAccessor, FLOAT64_ELEMENTS, ByteArray) \
V(Uint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, ByteArray) \
V(BigUint64ElementsAccessor, BIGUINT64_ELEMENTS, ByteArray) \
- V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray)
+ V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray) \
+ V(RabGsabUint8ElementsAccessor, RAB_GSAB_UINT8_ELEMENTS, ByteArray) \
+ V(RabGsabInt8ElementsAccessor, RAB_GSAB_INT8_ELEMENTS, ByteArray) \
+ V(RabGsabUint16ElementsAccessor, RAB_GSAB_UINT16_ELEMENTS, ByteArray) \
+ V(RabGsabInt16ElementsAccessor, RAB_GSAB_INT16_ELEMENTS, ByteArray) \
+ V(RabGsabUint32ElementsAccessor, RAB_GSAB_UINT32_ELEMENTS, ByteArray) \
+ V(RabGsabInt32ElementsAccessor, RAB_GSAB_INT32_ELEMENTS, ByteArray) \
+ V(RabGsabFloat32ElementsAccessor, RAB_GSAB_FLOAT32_ELEMENTS, ByteArray) \
+ V(RabGsabFloat64ElementsAccessor, RAB_GSAB_FLOAT64_ELEMENTS, ByteArray) \
+ V(RabGsabUint8ClampedElementsAccessor, RAB_GSAB_UINT8_CLAMPED_ELEMENTS, \
+ ByteArray) \
+ V(RabGsabBigUint64ElementsAccessor, RAB_GSAB_BIGUINT64_ELEMENTS, ByteArray) \
+ V(RabGsabBigInt64ElementsAccessor, RAB_GSAB_BIGINT64_ELEMENTS, ByteArray)
template <ElementsKind Kind>
class ElementsKindTraits {
@@ -639,57 +663,64 @@ class ElementsAccessorBase : public InternalElementsAccessor {
UNREACHABLE();
}
- void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, uint32_t new_capacity) final {
- Subclass::AddImpl(object, index, value, attributes, new_capacity);
+ Maybe<bool> Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, uint32_t new_capacity) final {
+ return Subclass::AddImpl(object, index, value, attributes, new_capacity);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
- uint32_t Push(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) final {
+ Maybe<uint32_t> Push(Handle<JSArray> receiver, BuiltinArguments* args,
+ uint32_t push_size) final {
return Subclass::PushImpl(receiver, args, push_size);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_sized) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_sized) {
UNREACHABLE();
}
- uint32_t Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) final {
+ Maybe<uint32_t> Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
+ uint32_t unshift_size) final {
return Subclass::UnshiftImpl(receiver, args, unshift_size);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) {
+ static Maybe<uint32_t> UnshiftImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args,
+ uint32_t unshift_size) {
UNREACHABLE();
}
- Handle<Object> Pop(Handle<JSArray> receiver) final {
+ MaybeHandle<Object> Pop(Handle<JSArray> receiver) final {
return Subclass::PopImpl(receiver);
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- Handle<Object> Shift(Handle<JSArray> receiver) final {
+ MaybeHandle<Object> Shift(Handle<JSArray> receiver) final {
return Subclass::ShiftImpl(receiver);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- void SetLength(Handle<JSArray> array, uint32_t length) final {
- Subclass::SetLengthImpl(array->GetIsolate(), array, length,
- handle(array->elements(), array->GetIsolate()));
+ Maybe<bool> SetLength(Handle<JSArray> array, uint32_t length) final {
+ return Subclass::SetLengthImpl(
+ array->GetIsolate(), array, length,
+ handle(array->elements(), array->GetIsolate()));
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
DCHECK(!array->SetLengthWouldNormalize(length));
DCHECK(IsFastElementsKind(array->GetElementsKind()));
uint32_t old_length = 0;
@@ -735,11 +766,13 @@ class ElementsAccessorBase : public InternalElementsAccessor {
} else {
// Check whether the backing store should be expanded.
capacity = std::max(length, JSObject::NewElementsCapacity(capacity));
- Subclass::GrowCapacityAndConvertImpl(array, capacity);
+ MAYBE_RETURN(Subclass::GrowCapacityAndConvertImpl(array, capacity),
+ Nothing<bool>());
}
array->set_length(Smi::FromInt(length));
JSObject::ValidateElements(*array);
+ return Just(true);
}
size_t NumberOfElements(JSObject receiver) final {
@@ -765,23 +798,35 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetMaxIndex(receiver, elements);
}
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ static MaybeHandle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
return ConvertElementsWithCapacity(object, old_elements, from_kind,
capacity, 0, 0);
}
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ static MaybeHandle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
uint32_t dst_index) {
Isolate* isolate = object->GetIsolate();
Handle<FixedArrayBase> new_elements;
+ // TODO(victorgomes): Retrieve native context in optimized code
+ // and remove the check isolate->context().is_null().
if (IsDoubleElementsKind(kind())) {
+ if (!isolate->context().is_null() &&
+ !base::IsInRange(capacity, 0, FixedDoubleArray::kMaxLength)) {
+ return isolate->Throw<FixedArrayBase>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
new_elements = isolate->factory()->NewFixedDoubleArray(capacity);
} else {
- new_elements = isolate->factory()->NewUninitializedFixedArray(capacity);
+ if (!isolate->context().is_null() &&
+ !base::IsInRange(capacity, 0, FixedArray::kMaxLength)) {
+ return isolate->Throw<FixedArrayBase>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
+ new_elements = isolate->factory()->NewFixedArray(capacity);
}
int packed_size = kPackedSizeNotKnown;
@@ -793,11 +838,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
from_kind, dst_index, packed_size,
kCopyToEndAndInitializeToHole);
- return new_elements;
+ return MaybeHandle<FixedArrayBase>(new_elements);
}
- static void TransitionElementsKindImpl(Handle<JSObject> object,
- Handle<Map> to_map) {
+ static Maybe<bool> TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> to_map) {
Isolate* isolate = object->GetIsolate();
Handle<Map> from_map = handle(object->map(), isolate);
ElementsKind from_kind = from_map->elements_kind();
@@ -822,8 +867,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
(IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
uint32_t capacity = static_cast<uint32_t>(object->elements().length());
- Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
- object, from_elements, from_kind, capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, from_elements, from_kind,
+ capacity),
+ Nothing<bool>());
JSObject::SetMapAndElements(object, to_map, elements);
}
if (FLAG_trace_elements_transitions) {
@@ -832,10 +881,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
handle(object->elements(), isolate));
}
}
+ return Just(true);
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
ElementsKind from_kind = object->GetElementsKind();
if (IsSmiOrObjectElementsKind(from_kind)) {
// Array optimizations rely on the prototype lookups of Array objects
@@ -850,15 +900,18 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) ||
IsDictionaryElementsKind(from_kind) ||
static_cast<uint32_t>(old_elements->length()) < capacity);
- Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
- kind(), capacity);
+ return Subclass::BasicGrowCapacityAndConvertImpl(
+ object, old_elements, from_kind, kind(), capacity);
}
- static void BasicGrowCapacityAndConvertImpl(
+ static Maybe<bool> BasicGrowCapacityAndConvertImpl(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) {
- Handle<FixedArrayBase> elements =
- ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, old_elements, from_kind, capacity),
+ Nothing<bool>());
if (IsHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -873,40 +926,45 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements,
to_kind, elements);
}
+ return Just(true);
}
- void TransitionElementsKind(Handle<JSObject> object, Handle<Map> map) final {
- Subclass::TransitionElementsKindImpl(object, map);
+ Maybe<bool> TransitionElementsKind(Handle<JSObject> object,
+ Handle<Map> map) final {
+ return Subclass::TransitionElementsKindImpl(object, map);
}
- void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) final {
- Subclass::GrowCapacityAndConvertImpl(object, capacity);
+ Maybe<bool> GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
+ return Subclass::GrowCapacityAndConvertImpl(object, capacity);
}
- bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
+ Maybe<bool> GrowCapacity(Handle<JSObject> object, uint32_t index) final {
// This function is intended to be called from optimized code. We don't
// want to trigger lazy deopts there, so refuse to handle cases that would.
if (object->map().is_prototype_map() ||
object->WouldConvertToSlowElements(index)) {
- return false;
+ return Just(false);
}
Handle<FixedArrayBase> old_elements(object->elements(),
object->GetIsolate());
uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
- Handle<FixedArrayBase> elements =
- ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity),
+ Nothing<bool>());
DCHECK_EQ(object->GetElementsKind(), kind());
// Transition through the allocation site as well if present.
if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
object, kind())) {
- return false;
+ return Just(false);
}
object->set_elements(*elements);
- return true;
+ return Just(true);
}
void Delete(Handle<JSObject> obj, InternalIndex entry) final {
@@ -1220,13 +1278,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetCapacityImpl(holder, backing_store);
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> obj_value, size_t start,
+ size_t end) {
UNREACHABLE();
}
- Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value, size_t start,
- size_t end) override {
+ MaybeHandle<Object> Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ size_t start, size_t end) override {
return Subclass::FillImpl(receiver, obj_value, start, end);
}
@@ -1348,9 +1407,9 @@ class DictionaryElementsAccessor
return dict.NumberOfElements();
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
Handle<NumberDictionary> dict =
Handle<NumberDictionary>::cast(backing_store);
uint32_t old_length = 0;
@@ -1401,6 +1460,7 @@ class DictionaryElementsAccessor
Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
array->set_length(*length_obj);
+ return Just(true);
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -1466,9 +1526,10 @@ class DictionaryElementsAccessor
dictionary.DetailsAtPut(entry, details);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<NumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
@@ -1479,8 +1540,9 @@ class DictionaryElementsAccessor
object->GetIsolate(), dictionary, index, value, details);
new_dictionary->UpdateMaxNumberKey(index, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
- if (dictionary.is_identical_to(new_dictionary)) return;
+ if (dictionary.is_identical_to(new_dictionary)) return Just(true);
object->set_elements(*new_dictionary);
+ return Just(true);
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store,
@@ -1904,12 +1966,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
backing_store->set_the_hole(isolate, entry);
// TODO(verwaest): Move this out of elements.cc.
- // If an old space backing store is larger than a certain size and
+ // If the backing store is larger than a certain size and
// has too few used values, normalize it.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() < kMinLengthForSparsenessCheck) return;
- // TODO(ulan): Check if it works with young large objects.
- if (ObjectInYoungGeneration(*backing_store)) return;
uint32_t length = 0;
if (obj->IsJSArray()) {
JSArray::cast(*obj).length().ToArrayLength(&length);
@@ -1971,9 +2031,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
value, attributes);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
ElementsKind from_kind = object->GetElementsKind();
ElementsKind to_kind = Subclass::kind();
@@ -1981,7 +2042,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) ||
Subclass::GetCapacityImpl(*object, object->elements()) !=
new_capacity) {
- Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(Subclass::GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
} else {
if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
JSObject::TransitionElementsKind(object, to_kind);
@@ -1992,6 +2054,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
Subclass::SetImpl(object, InternalIndex(index), *value);
+ return Just(true);
}
static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
@@ -2086,24 +2149,25 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
#endif
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) {
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
return Subclass::RemoveElement(receiver, AT_END);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ static MaybeHandle<Object> ShiftImpl(Handle<JSArray> receiver) {
return Subclass::RemoveElement(receiver, AT_START);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, push_size,
AT_END);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) {
+ static Maybe<uint32_t> UnshiftImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args,
+ uint32_t unshift_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
@@ -2137,8 +2201,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> obj_value, size_t start,
+ size_t end) {
// Ensure indexes are within array bounds
DCHECK_LE(0, start);
DCHECK_LE(start, end);
@@ -2151,8 +2216,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Make sure we have enough space.
DCHECK_LE(end, std::numeric_limits<uint32_t>::max());
if (end > Subclass::GetCapacityImpl(*receiver, receiver->elements())) {
- Subclass::GrowCapacityAndConvertImpl(receiver,
- static_cast<uint32_t>(end));
+ MAYBE_RETURN_NULL(Subclass::GrowCapacityAndConvertImpl(
+ receiver, static_cast<uint32_t>(end)));
CHECK_EQ(Subclass::kind(), receiver->GetElementsKind());
}
DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements()));
@@ -2160,7 +2225,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (size_t index = start; index < end; ++index) {
Subclass::SetImpl(receiver, InternalIndex(index), *obj_value);
}
- return *receiver;
+ return MaybeHandle<Object>(receiver);
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -2322,8 +2387,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return result;
}
- static Handle<Object> RemoveElement(Handle<JSArray> receiver,
- Where remove_position) {
+ static MaybeHandle<Object> RemoveElement(Handle<JSArray> receiver,
+ Where remove_position) {
Isolate* isolate = receiver->GetIsolate();
ElementsKind kind = KindTraits::Kind;
if (IsSmiOrObjectElementsKind(kind)) {
@@ -2341,24 +2406,26 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
0, 0);
}
- Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
+ MAYBE_RETURN_NULL(
+ Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store));
if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
- return result;
+ return MaybeHandle<Object>(result);
}
- static uint32_t AddArguments(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- BuiltinArguments* args, uint32_t add_size,
- Where add_position) {
+ static Maybe<uint32_t> AddArguments(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ BuiltinArguments* args, uint32_t add_size,
+ Where add_position) {
uint32_t length = Smi::ToInt(receiver->length());
DCHECK_LT(0, add_size);
uint32_t elms_len = backing_store->length();
// Check we do not overflow the new_length.
DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
uint32_t new_length = length + add_size;
+ Isolate* isolate = receiver->GetIsolate();
if (new_length > elms_len) {
// New backing storage is needed.
@@ -2366,14 +2433,16 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// If we add arguments to the start we have to shift the existing objects.
int copy_dst_index = add_position == AT_START ? add_size : 0;
// Copy over all objects to a new backing_store.
- backing_store = Subclass::ConvertElementsWithCapacity(
- receiver, backing_store, KindTraits::Kind, capacity, 0,
- copy_dst_index);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, backing_store,
+ Subclass::ConvertElementsWithCapacity(receiver, backing_store,
+ KindTraits::Kind, capacity, 0,
+ copy_dst_index),
+ Nothing<uint32_t>());
receiver->set_elements(*backing_store);
} else if (add_position == AT_START) {
// If the backing store has enough capacity and we add elements to the
// start we have to shift the existing objects.
- Isolate* isolate = receiver->GetIsolate();
Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0,
length, 0, 0);
}
@@ -2383,7 +2452,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index);
// Set the length.
receiver->set_length(Smi::FromInt(new_length));
- return new_length;
+ return Just(new_length);
}
static void CopyArguments(BuiltinArguments* args,
@@ -2469,6 +2538,7 @@ class FastSmiOrObjectElementsAccessor
case SLOW_STRING_WRAPPER_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2569,28 +2639,29 @@ class FastNonextensibleObjectElementsAccessor
public:
using BackingStore = typename KindTraits::BackingStore;
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
// TODO(duongn): refactor this due to code duplication of sealed version.
// Consider using JSObject::NormalizeElements(). Also consider follow the fast
// element logic instead of changing to dictionary mode.
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
uint32_t old_length = 0;
CHECK(array->length().ToArrayIndex(&old_length));
if (length == old_length) {
// Do nothing.
- return;
+ return Just(true);
}
// Transition to DICTIONARY_ELEMENTS.
@@ -2622,8 +2693,8 @@ class FastNonextensibleObjectElementsAccessor
// Set length.
Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
- DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
- new_backing_store);
+ return DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
}
};
@@ -2662,30 +2733,33 @@ class FastSealedObjectElementsAccessor
UNREACHABLE();
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
// TODO(duongn): refactor this due to code duplication of nonextensible
// version. Consider using JSObject::NormalizeElements(). Also consider follow
// the fast element logic instead of changing to dictionary mode.
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
uint32_t old_length = 0;
CHECK(array->length().ToArrayIndex(&old_length));
if (length == old_length) {
// Do nothing.
- return;
+ return Just(true);
}
// Transition to DICTIONARY_ELEMENTS.
@@ -2717,8 +2791,8 @@ class FastSealedObjectElementsAccessor
// Set length
Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
- DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
- new_backing_store);
+ return DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
}
};
@@ -2772,22 +2846,25 @@ class FastFrozenObjectElementsAccessor
UNREACHABLE();
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
UNREACHABLE();
}
@@ -2877,6 +2954,7 @@ class FastDoubleElementsAccessor
case NO_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2952,6 +3030,8 @@ class FastHoleyDoubleElementsAccessor
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {};
+enum IsSharedBuffer : bool { kShared = true, kUnshared = false };
+
// Super class for all external element arrays.
template <ElementsKind Kind, typename ElementType>
class TypedElementsAccessor
@@ -2998,42 +3078,70 @@ class TypedElementsAccessor
static void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
- DCHECK_LE(entry.raw_value(), typed_array->length());
- SetImpl(static_cast<ElementType*>(typed_array->DataPtr()),
- entry.raw_value(), FromObject(value));
- }
+ DCHECK_LE(entry.raw_value(), typed_array->GetLength());
+ auto* entry_ptr =
+ static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
+ auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
+ SetImpl(entry_ptr, FromObject(value), is_shared);
+ }
+
+ static void SetImpl(ElementType* data_ptr, ElementType value,
+ IsSharedBuffer is_shared) {
+ // TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
+ // fields (external pointers, doubles and BigInt data) are not always 8-byte
+ // aligned. This is relying on undefined behaviour in C++, since {data_ptr}
+ // is not aligned to {alignof(ElementType)}.
+ if (!is_shared) {
+ base::WriteUnalignedValue(reinterpret_cast<Address>(data_ptr), value);
+ return;
+ }
- static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
// The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
- // racy accesses and warn about them, so we disable TSAN for these reads
- // and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only
- // kTaggedSize aligned so we have to use unaligned pointer friendly way of
- // accessing them in order to avoid undefined behavior in C++ code.
- base::WriteUnalignedValue<ElementType>(
- reinterpret_cast<Address>(data_ptr + entry), value);
- } else {
- data_ptr[entry] = value;
+ // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
+ // required for JavaScript, but will avoid undefined behaviour in C++ and is
+ // unlikely to introduce noticable overhead.
+ if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
+ alignof(std::atomic<ElementType>))) {
+ // Use a single relaxed atomic store.
+ STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
+ reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->store(
+ value, std::memory_order_relaxed);
+ return;
+ }
+
+ // Some static CHECKs (are optimized out if succeeding) to ensure that
+ // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
+ // has size and alignment of four bytes, such that we can cast the
+ // {data_ptr} to it.
+ CHECK_LE(kInt32Size, alignof(ElementType));
+ CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
+ CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
+ // And dynamically check that we indeed have at least four byte alignment.
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
+ // Store as multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
+ // warnings for the empty array or memcpy to an empty object.
+ constexpr size_t kNumWords =
+ std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
+ uint32_t words[kNumWords];
+ CHECK_EQ(sizeof(words), sizeof(value));
+ memcpy(words, &value, sizeof(value));
+ for (size_t word = 0; word < kNumWords; ++word) {
+ STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
+ reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].store(
+ words[word], std::memory_order_relaxed);
}
- TSAN_ANNOTATE_IGNORE_WRITES_END;
}
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
InternalIndex entry) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
Isolate* isolate = typed_array->GetIsolate();
- DCHECK_LE(entry.raw_value(), typed_array->length());
+ DCHECK_LT(entry.raw_value(), typed_array->GetLength());
DCHECK(!typed_array->WasDetached());
- ElementType elem = GetImpl(
- static_cast<ElementType*>(typed_array->DataPtr()), entry.raw_value());
+ auto* element_ptr =
+ static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
+ auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
+ ElementType elem = GetImpl(element_ptr, is_shared);
return ToHandle(isolate, elem);
}
@@ -3042,28 +3150,53 @@ class TypedElementsAccessor
UNREACHABLE();
}
- static ElementType GetImpl(ElementType* data_ptr, size_t entry) {
+ static ElementType GetImpl(ElementType* data_ptr, IsSharedBuffer is_shared) {
+ // TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
+ // fields (external pointers, doubles and BigInt data) are not always
+ // 8-byte aligned.
+ if (!is_shared) {
+ return base::ReadUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr));
+ }
+
// The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
- // racy accesses and warn about them, so we disable TSAN for these reads
- // and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- ElementType result;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only
- // kTaggedSize aligned so we have to use unaligned pointer friendly way of
- // accessing them in order to avoid undefined behavior in C++ code.
- result = base::ReadUnalignedValue<ElementType>(
- reinterpret_cast<Address>(data_ptr + entry));
- } else {
- result = data_ptr[entry];
+ // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
+ // required for JavaScript, but will avoid undefined behaviour in C++ and is
+ // unlikely to introduce noticable overhead.
+ if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
+ alignof(std::atomic<ElementType>))) {
+ // Use a single relaxed atomic load.
+ STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
+ // Note: acquire semantics are not needed here, but clang seems to merge
+ // this atomic load with the non-atomic load above if we use relaxed
+ // semantics. This will result in TSan failures.
+ return reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->load(
+ std::memory_order_acquire);
+ }
+
+ // Some static CHECKs (are optimized out if succeeding) to ensure that
+ // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
+ // has size and alignment of four bytes, such that we can cast the
+ // {data_ptr} to it.
+ CHECK_LE(kInt32Size, alignof(ElementType));
+ CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
+ CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
+ // And dynamically check that we indeed have at least four byte alignment.
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
+ // Load in multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
+ // warnings for the empty array or memcpy to an empty object.
+ constexpr size_t kNumWords =
+ std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
+ uint32_t words[kNumWords];
+ for (size_t word = 0; word < kNumWords; ++word) {
+ STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
+ words[word] =
+ reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].load(
+ std::memory_order_relaxed);
}
- TSAN_ANNOTATE_IGNORE_READS_END;
+ ElementType result;
+ CHECK_EQ(sizeof(words), sizeof(result));
+ memcpy(&result, words, sizeof(result));
return result;
}
@@ -3086,9 +3219,9 @@ class TypedElementsAccessor
return false;
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
}
@@ -3111,8 +3244,7 @@ class TypedElementsAccessor
static size_t GetCapacityImpl(JSObject holder, FixedArrayBase backing_store) {
JSTypedArray typed_array = JSTypedArray::cast(holder);
- if (typed_array.WasDetached()) return 0;
- return typed_array.length();
+ return typed_array.GetLength();
}
static size_t NumberOfElementsImpl(JSObject receiver,
@@ -3155,8 +3287,9 @@ class TypedElementsAccessor
return Just(true);
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> value, size_t start,
+ size_t end) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!typed_array->WasDetached());
DCHECK_LE(start, end);
@@ -3171,7 +3304,7 @@ class TypedElementsAccessor
} else {
std::fill(data + start, data + end, scalar);
}
- return *typed_array;
+ return MaybeHandle<Object>(typed_array);
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -3200,6 +3333,7 @@ class TypedElementsAccessor
ElementType typed_search_value;
ElementType* data_ptr =
reinterpret_cast<ElementType*>(typed_array.DataPtr());
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just(false);
bool lossless;
@@ -3215,8 +3349,8 @@ class TypedElementsAccessor
}
if (std::isnan(search_value)) {
for (size_t k = start_from; k < length; ++k) {
- double elem_k =
- static_cast<double>(AccessorClass::GetImpl(data_ptr, k));
+ double elem_k = static_cast<double>(
+ AccessorClass::GetImpl(data_ptr + k, is_shared));
if (std::isnan(elem_k)) return Just(true);
}
return Just(false);
@@ -3233,7 +3367,7 @@ class TypedElementsAccessor
}
for (size_t k = start_from; k < length; ++k) {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just(true);
}
return Just(false);
@@ -3285,8 +3419,9 @@ class TypedElementsAccessor
length = typed_array.length();
}
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
for (size_t k = start_from; k < length; ++k) {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k);
}
return Just<int64_t>(-1);
@@ -3333,8 +3468,9 @@ class TypedElementsAccessor
DCHECK_LT(start_from, typed_array.length());
size_t k = start_from;
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
do {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k);
} while (k-- != 0);
return Just<int64_t>(-1);
@@ -3384,12 +3520,16 @@ class TypedElementsAccessor
size_t count = end - start;
DCHECK_LE(count, destination.length());
ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
+ auto is_shared =
+ source.buffer().is_shared() || destination.buffer().is_shared()
+ ? kShared
+ : kUnshared;
switch (source.GetElementsKind()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: { \
ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \
- count); \
+ count, is_shared); \
break; \
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -3408,16 +3548,16 @@ class TypedElementsAccessor
template <ElementsKind SourceKind, typename SourceElementType>
static void CopyBetweenBackingStores(SourceElementType* source_data_ptr,
ElementType* dest_data_ptr,
- size_t length) {
- DisallowGarbageCollection no_gc;
- for (size_t i = 0; i < length; i++) {
+ size_t length,
+ IsSharedBuffer is_shared) {
+ for (; length > 0; --length, ++source_data_ptr, ++dest_data_ptr) {
// We use scalar accessors to avoid boxing/unboxing, so there are no
// allocations.
SourceElementType source_elem =
TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl(
- source_data_ptr, i);
+ source_data_ptr, is_shared);
ElementType dest_elem = FromScalar(source_elem);
- SetImpl(dest_data_ptr, i, dest_elem);
+ SetImpl(dest_data_ptr, dest_elem, is_shared);
}
}
@@ -3448,14 +3588,24 @@ class TypedElementsAccessor
size_t source_byte_length = source.byte_length();
size_t dest_byte_length = destination.byte_length();
+ bool source_shared = source.buffer().is_shared();
+ bool destination_shared = destination.buffer().is_shared();
+
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
// will be the same. This is not the case for floats or clamped Uint8,
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
size_t element_size = source.element_size();
- std::memmove(dest_data + offset * element_size, source_data,
- length * element_size);
+ if (source_shared || destination_shared) {
+ base::Relaxed_Memcpy(
+ reinterpret_cast<base::Atomic8*>(dest_data + offset * element_size),
+ reinterpret_cast<base::Atomic8*>(source_data),
+ length * element_size);
+ } else {
+ std::memmove(dest_data + offset * element_size, source_data,
+ length * element_size);
+ }
} else {
std::unique_ptr<uint8_t[]> cloned_source_elements;
@@ -3463,17 +3613,25 @@ class TypedElementsAccessor
if (dest_data + dest_byte_length > source_data &&
source_data + source_byte_length > dest_data) {
cloned_source_elements.reset(new uint8_t[source_byte_length]);
- std::memcpy(cloned_source_elements.get(), source_data,
- source_byte_length);
+ if (source_shared) {
+ base::Relaxed_Memcpy(
+ reinterpret_cast<base::Atomic8*>(cloned_source_elements.get()),
+ reinterpret_cast<base::Atomic8*>(source_data),
+ source_byte_length);
+ } else {
+ std::memcpy(cloned_source_elements.get(), source_data,
+ source_byte_length);
+ }
source_data = cloned_source_elements.get();
}
switch (source.GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
- reinterpret_cast<ctype*>(source_data), \
- reinterpret_cast<ElementType*>(dest_data) + offset, length); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
+ reinterpret_cast<ctype*>(source_data), \
+ reinterpret_cast<ElementType*>(dest_data) + offset, length, \
+ source_shared || destination_shared ? kShared : kUnshared); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3529,6 +3687,9 @@ class TypedElementsAccessor
ElementsKind kind = source.GetElementsKind();
+ auto destination_shared =
+ destination.buffer().is_shared() ? kShared : kUnshared;
+
// When we find the hole, we normally have to look up the element on the
// prototype chain, which is not handled here and we return false instead.
// When the array has the original array prototype, and that prototype has
@@ -3546,17 +3707,19 @@ class TypedElementsAccessor
for (size_t i = 0; i < length; i++) {
Object elem = source_store.get(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
+ destination_shared);
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray source_store = FixedArray::cast(source.elements());
for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(isolate, static_cast<int>(i))) {
- SetImpl(dest_data, i, FromObject(undefined));
+ SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else {
Object elem = source_store.get(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
+ destination_shared);
}
}
return true;
@@ -3569,17 +3732,17 @@ class TypedElementsAccessor
// Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem.
double elem = source_store.get_scalar(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(elem));
+ SetImpl(dest_data + i, FromScalar(elem), destination_shared);
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(static_cast<int>(i))) {
- SetImpl(dest_data, i, FromObject(undefined));
+ SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else {
double elem = source_store.get_scalar(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(elem));
+ SetImpl(dest_data + i, FromScalar(elem), destination_shared);
}
}
return true;
@@ -3872,9 +4035,225 @@ Handle<Object> TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::ToHandle(
return BigInt::FromUint64(isolate, value);
}
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT8_ELEMENTS, int8_t>::ToHandle(
+ Isolate* isolate, int8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT8_ELEMENTS,
+ uint8_t>::ToHandle(Isolate* isolate,
+ uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT16_ELEMENTS,
+ int16_t>::ToHandle(Isolate* isolate,
+ int16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT16_ELEMENTS,
+ uint16_t>::ToHandle(Isolate* isolate,
+ uint16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT32_ELEMENTS,
+ int32_t>::ToHandle(Isolate* isolate,
+ int32_t value) {
+ return isolate->factory()->NewNumberFromInt(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT32_ELEMENTS,
+ uint32_t>::ToHandle(Isolate* isolate,
+ uint32_t value) {
+ return isolate->factory()->NewNumberFromUint(value);
+}
+
+// static
+template <>
+float TypedElementsAccessor<RAB_GSAB_FLOAT32_ELEMENTS, float>::FromScalar(
+ double value) {
+ return DoubleToFloat32(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_FLOAT32_ELEMENTS,
+ float>::ToHandle(Isolate* isolate,
+ float value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+double TypedElementsAccessor<RAB_GSAB_FLOAT64_ELEMENTS, double>::FromScalar(
+ double value) {
+ return value;
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_FLOAT64_ELEMENTS,
+ double>::ToHandle(Isolate* isolate,
+ double value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(int value) {
+ if (value < 0x00) return 0x00;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::ToHandle(Isolate* isolate,
+ uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int64_t value) {
+ return value;
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsInt64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS,
+ int64_t>::ToHandle(Isolate* isolate,
+ int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(uint64_t value) {
+ return value;
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromObject(Object value,
+ bool* lossless) {
+ return BigInt::cast(value).AsUint64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::ToHandle(Isolate* isolate,
+ uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
using Type##ElementsAccessor = TypedElementsAccessor<TYPE##_ELEMENTS, ctype>;
TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
+RAB_GSAB_TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
#undef FIXED_ELEMENTS_ACCESSOR
template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
@@ -3894,7 +4273,7 @@ class SloppyArgumentsElementsAccessor
if (entry.as_uint32() < length) {
// Read context mapped entry.
DisallowGarbageCollection no_gc;
- Object probe = elements->mapped_entries(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -3908,13 +4287,13 @@ class SloppyArgumentsElementsAccessor
}
}
- static void TransitionElementsKindImpl(Handle<JSObject> object,
- Handle<Map> map) {
+ static Maybe<bool> TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> map) {
UNREACHABLE();
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
UNREACHABLE();
}
@@ -3930,7 +4309,7 @@ class SloppyArgumentsElementsAccessor
if (entry.as_uint32() < length) {
// Store context mapped entry.
DisallowGarbageCollection no_gc;
- Object probe = elements.mapped_entries(entry.as_uint32());
+ Object probe = elements.mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole());
Context context = Context::cast(elements.context());
int context_entry = Smi::ToInt(probe);
@@ -3953,9 +4332,9 @@ class SloppyArgumentsElementsAccessor
}
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> parameter_map) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> parameter_map) {
// Sloppy arguments objects are not arrays.
UNREACHABLE();
}
@@ -4063,7 +4442,7 @@ class SloppyArgumentsElementsAccessor
size_t index) {
uint32_t length = elements.length();
if (index >= length) return false;
- return !elements.mapped_entries(static_cast<uint32_t>(index))
+ return !elements.mapped_entries(static_cast<uint32_t>(index), kRelaxedLoad)
.IsTheHole(isolate);
}
@@ -4118,7 +4497,8 @@ class SloppyArgumentsElementsAccessor
uint32_t length = elements->length();
for (uint32_t i = 0; i < length; ++i) {
- if (elements->mapped_entries(i).IsTheHole(isolate)) continue;
+ if (elements->mapped_entries(i, kRelaxedLoad).IsTheHole(isolate))
+ continue;
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -4251,9 +4631,10 @@ class SlowSloppyArgumentsElementsAccessor
NumberDictionary::DeleteEntry(isolate, dict, entry.adjust_down(length));
elements->set_arguments(*dict);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
@@ -4270,6 +4651,7 @@ class SlowSloppyArgumentsElementsAccessor
if (*dictionary != *new_dictionary) {
elements->set_arguments(*new_dictionary);
}
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4281,7 +4663,7 @@ class SlowSloppyArgumentsElementsAccessor
Handle<SloppyArgumentsElements>::cast(store);
uint32_t length = elements->length();
if (entry.as_uint32() < length) {
- Object probe = elements->mapped_entries(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -4365,9 +4747,10 @@ class FastSloppyArgumentsElementsAccessor
SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
@@ -4375,7 +4758,8 @@ class FastSloppyArgumentsElementsAccessor
Handle<FixedArray> old_arguments(elements->arguments(), isolate);
if (old_arguments->IsNumberDictionary() ||
static_cast<uint32_t>(old_arguments->length()) < new_capacity) {
- GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
}
FixedArray arguments = elements->arguments();
// For fast holey objects, the entry equals the index. The code above made
@@ -4385,6 +4769,7 @@ class FastSloppyArgumentsElementsAccessor
// kMaxUInt32.
FastHoleyObjectElementsAccessor::SetImpl(arguments, InternalIndex(index),
*value);
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4414,8 +4799,8 @@ class FastSloppyArgumentsElementsAccessor
}
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
@@ -4426,13 +4811,17 @@ class FastSloppyArgumentsElementsAccessor
// elements.
DCHECK(from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS ||
static_cast<uint32_t>(old_arguments->length()) < capacity);
- Handle<FixedArrayBase> arguments =
- ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity);
+ Handle<FixedArrayBase> arguments;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, arguments,
+ ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity),
+ Nothing<bool>());
Handle<Map> new_map = JSObject::GetElementsTransitionMap(
object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
JSObject::MigrateToMap(isolate, object, new_map);
elements->set_arguments(FixedArray::cast(*arguments));
JSObject::ValidateElements(*object);
+ return Just(true);
}
};
@@ -4504,9 +4893,10 @@ class StringWrapperElementsAccessor
value);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK(index >= static_cast<uint32_t>(GetString(*object).length()));
// Explicitly grow fast backing stores if needed. Dictionaries know how to
// extend their capacity themselves.
@@ -4514,10 +4904,12 @@ class StringWrapperElementsAccessor
(object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
new_capacity)) {
- GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
}
BackingStoreAccessor::AddImpl(object, index, value, attributes,
new_capacity);
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4562,8 +4954,8 @@ class StringWrapperElementsAccessor
backing_store, keys);
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
Handle<FixedArrayBase> old_elements(object->elements(),
object->GetIsolate());
ElementsKind from_kind = object->GetElementsKind();
@@ -4578,9 +4970,9 @@ class StringWrapperElementsAccessor
// elements.
DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
static_cast<uint32_t>(old_elements->length()) < capacity);
- Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
- FAST_STRING_WRAPPER_ELEMENTS,
- capacity);
+ return Subclass::BasicGrowCapacityAndConvertImpl(
+ object, old_elements, from_kind, FAST_STRING_WRAPPER_ELEMENTS,
+ capacity);
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -4665,7 +5057,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(
} else {
// Take the argument as the length.
JSArray::Initialize(array, 0);
- JSArray::SetLength(array, length);
+ MAYBE_RETURN_NULL(JSArray::SetLength(array, length));
}
return array;
}