diff options
author | Michaël Zasso <targos@protonmail.com> | 2018-12-04 08:20:37 +0100 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2018-12-06 15:23:33 +0100 |
commit | 9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3 (patch) | |
tree | 2b0c843168dafb939d8df8a15b2aa72b76dee51d /deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc | |
parent | b8fbe69db1292307adb2c2b2e0d5ef48c4ab2faf (diff) | |
download | node-new-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.tar.gz |
deps: update V8 to 7.1.302.28
PR-URL: https://github.com/nodejs/node/pull/23423
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Gus Caplan <me@gus.host>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc')
-rw-r--r-- | deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc | 298 |
1 files changed, 233 insertions, 65 deletions
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 52673bfd36..4befb13d7c 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -11,6 +11,8 @@ namespace v8 { namespace internal { using compiler::Node; +template <typename T> +using TNode = compiler::TNode<T>; class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { public: @@ -21,7 +23,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { protected: typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type, Node* base, Node* offset, - Node* value); + Node* value, + Node* value_high); void ValidateSharedTypedArray(Node* tagged, Node* context, Node** out_instance_type, Node** out_backing_store); @@ -35,6 +38,11 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value, Node* context, AssemblerFunction function, Runtime::FunctionId runtime_function); + + // Create a BigInt from the result of a 64-bit atomic operation, using + // projections on 32-bit platforms. + TNode<BigInt> BigIntFromSigned64(Node* signed64); + TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64); }; void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( @@ -50,10 +58,9 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( &invalid); // Fail if the array's JSArrayBuffer is not shared. - Node* array_buffer = LoadObjectField(tagged, JSTypedArray::kBufferOffset); - Node* bitfield = LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldOffset, - MachineType::Uint32()); - GotoIfNot(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &invalid); + TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged)); + TNode<Uint32T> bitfield = LoadJSArrayBufferBitField(array_buffer); + GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid); // Fail if the array's element type is float32, float64 or clamped. Node* elements_instance_type = LoadInstanceType(LoadElements(tagged)); @@ -63,8 +70,13 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE); STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE); STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE); - Branch(Int32LessThan(elements_instance_type, + GotoIf(Int32LessThan(elements_instance_type, Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)), + ¬_float_or_clamped); + STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE); + STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE); + Branch(Int32GreaterThan(elements_instance_type, + Int32Constant(FIXED_UINT8_CLAMPED_ARRAY_TYPE)), ¬_float_or_clamped, &invalid); BIND(&invalid); @@ -76,15 +88,12 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( BIND(¬_float_or_clamped); *out_instance_type = elements_instance_type; - Node* backing_store = - LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset); - Node* byte_offset = ChangeUint32ToWord(TruncateTaggedToWord32( - context, LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset))); - *out_backing_store = - IntPtrAdd(BitcastTaggedToWord(backing_store), byte_offset); + TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer); + TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged)); + *out_backing_store = IntPtrAdd(backing_store, byte_offset); } -// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess +// https://tc39.github.io/ecma262/#sec-validateatomicaccess Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32( Node* tagged, Node* context, Node** number_index) { VARIABLE(var_result, MachineRepresentation::kWord32); @@ -112,7 +121,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array, // Check if the index is in bounds. If not, throw RangeError. Label check_passed(this); Node* array_length_word32 = - TruncateTaggedToWord32(context, LoadTypedArrayLength(CAST(array))); + TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array))); GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed); ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); @@ -130,10 +139,32 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex( CSA_ASSERT(this, Uint32LessThan(index_word, TruncateTaggedToWord32( - context, LoadTypedArrayLength(CAST(array))))); + context, LoadJSTypedArrayLength(CAST(array))))); } #endif +TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64( + Node* signed64) { + if (Is64()) { + return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64)); + } else { + TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64)); + TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64)); + return BigIntFromInt32Pair(low, high); + } +} + +TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64( + Node* unsigned64) { + if (Is64()) { + return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64)); + } else { + TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64)); + TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64)); + return BigIntFromUint32Pair(low, high); + } +} + TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { Node* array = Parameter(Descriptor::kArray); Node* index = Parameter(Descriptor::kIndex); @@ -150,14 +181,14 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { Node* index_word = ChangeUint32ToWord(index_word32); Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), - other(this); + i64(this), u64(this), other(this); int32_t case_values[] = { - FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, - FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, - }; - Label* case_labels[] = { - &i8, &u8, &i16, &u16, &i32, &u32, + FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, + FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, + FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, + FIXED_BIGINT64_ARRAY_TYPE, FIXED_BIGUINT64_ARRAY_TYPE, }; + Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64}; Switch(instance_type, &other, case_values, case_labels, arraysize(case_labels)); @@ -184,7 +215,24 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { BIND(&u32); Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store, WordShl(index_word, 2)))); +#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6 + BIND(&i64); + Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer)); + BIND(&u64); + Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer)); +#else + BIND(&i64); + // This uses Uint64() intentionally: AtomicLoad is not implemented for + // Int64(), which is fine because the machine instruction only cares + // about words. + Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store, + WordShl(index_word, 3)))); + + BIND(&u64); + Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store, + WordShl(index_word, 3)))); +#endif // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); @@ -206,6 +254,13 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { ValidateAtomicIndex(array, index_word32, context); Node* index_word = ChangeUint32ToWord(index_word32); + Label u8(this), u16(this), u32(this), u64(this), other(this); + STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + GotoIf( + Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)), + &u64); + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); Node* value_word32 = TruncateTaggedToWord32(context, value_integer); @@ -213,14 +268,11 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Label u8(this), u16(this), u32(this), other(this); int32_t case_values[] = { FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, }; - Label* case_labels[] = { - &u8, &u8, &u16, &u16, &u32, &u32, - }; + Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32}; Switch(instance_type, &other, case_values, case_labels, arraysize(case_labels)); @@ -239,6 +291,24 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { WordShl(index_word, 2), value_word32); Return(value_integer); + BIND(&u64); +#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6 + Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer, + value)); +#else + TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif + TVARIABLE(UintPtrT, var_low); + TVARIABLE(UintPtrT, var_high); + BigIntToRawBytes(value_bigint, &var_low, &var_high); + Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value()); + AtomicStore(MachineRepresentation::kWord64, backing_store, + WordShl(index_word, 3), var_low.value(), high); + Return(value_bigint); +#endif + // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); @@ -259,22 +329,26 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); - -#if DEBUG - DebugSanityCheckAtomicIndex(array, index_word32, context); -#endif - #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer, - value_integer)); + value)); #else Node* index_word = ChangeUint32ToWord(index_word32); + Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), + i64(this), u64(this), big(this), other(this); + STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + GotoIf( + Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)), + &big); + + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif Node* value_word32 = TruncateTaggedToWord32(context, value_integer); - Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), - other(this); int32_t case_values[] = { FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, @@ -311,6 +385,34 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { AtomicExchange(MachineType::Uint32(), backing_store, WordShl(index_word, 2), value_word32))); + BIND(&big); + TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif + TVARIABLE(UintPtrT, var_low); + TVARIABLE(UintPtrT, var_high); + BigIntToRawBytes(value_bigint, &var_low, &var_high); + Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value()); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)), + &i64); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)), + &u64); + Unreachable(); + + BIND(&i64); + // This uses Uint64() intentionally: AtomicExchange is not implemented for + // Int64(), which is fine because the machine instruction only cares + // about words. + Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store, + WordShl(index_word, 3), + var_low.value(), high))); + + BIND(&u64); + Return(BigIntFromUnsigned64( + AtomicExchange(MachineType::Uint64(), backing_store, + WordShl(index_word, 3), var_low.value(), high))); + // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); @@ -333,26 +435,29 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value)); - Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value)); - -#if DEBUG - DebugSanityCheckAtomicIndex(array, index_word32, context); -#endif - #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array, - index_integer, old_value_integer, new_value_integer)); + index_integer, old_value, new_value)); #else Node* index_word = ChangeUint32ToWord(index_word32); - Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer); + Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), + i64(this), u64(this), big(this), other(this); + STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + GotoIf( + Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)), + &big); + Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value)); + Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif + Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer); Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer); - Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), - other(this); int32_t case_values[] = { FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, @@ -393,6 +498,39 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { MachineType::Uint32(), backing_store, WordShl(index_word, 2), old_value_word32, new_value_word32))); + BIND(&big); + TNode<BigInt> old_value_bigint = ToBigInt(CAST(context), CAST(old_value)); + TNode<BigInt> new_value_bigint = ToBigInt(CAST(context), CAST(new_value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif + TVARIABLE(UintPtrT, var_old_low); + TVARIABLE(UintPtrT, var_old_high); + TVARIABLE(UintPtrT, var_new_low); + TVARIABLE(UintPtrT, var_new_high); + BigIntToRawBytes(old_value_bigint, &var_old_low, &var_old_high); + BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high); + Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value()); + Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value()); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)), + &i64); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)), + &u64); + Unreachable(); + + BIND(&i64); + // This uses Uint64() intentionally: AtomicCompareExchange is not implemented + // for Int64(), which is fine because the machine instruction only cares + // about words. + Return(BigIntFromSigned64(AtomicCompareExchange( + MachineType::Uint64(), backing_store, WordShl(index_word, 3), + var_old_low.value(), var_new_low.value(), old_high, new_high))); + + BIND(&u64); + Return(BigIntFromUnsigned64(AtomicCompareExchange( + MachineType::Uint64(), backing_store, WordShl(index_word, 3), + var_old_low.value(), var_new_low.value(), old_high, new_high))); + // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); @@ -429,27 +567,27 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); - -#if DEBUG - // In Debug mode, we re-validate the index as a sanity check because - // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be - // neutered and the TypedArray length can't change either, so skipping this - // check in Release mode is safe. - ValidateAtomicIndex(array, index_word32, context); -#endif - #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X - Return(CallRuntime(runtime_function, context, array, index_integer, - value_integer)); + Return(CallRuntime(runtime_function, context, array, index_integer, value)); #else Node* index_word = ChangeUint32ToWord(index_word32); + Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), + i64(this), u64(this), big(this), other(this); + + STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE); + GotoIf( + Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)), + &big); + + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif Node* value_word32 = TruncateTaggedToWord32(context, value_integer); - Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), - other(this); int32_t case_values[] = { FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, @@ -462,29 +600,59 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( BIND(&i8); Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store, - index_word, value_word32))); + index_word, value_word32, nullptr))); BIND(&u8); Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store, - index_word, value_word32))); + index_word, value_word32, nullptr))); BIND(&i16); Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store, - WordShl(index_word, 1), value_word32))); + WordShl(index_word, 1), value_word32, + nullptr))); BIND(&u16); Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store, - WordShl(index_word, 1), value_word32))); + WordShl(index_word, 1), value_word32, + nullptr))); BIND(&i32); Return(ChangeInt32ToTagged( (this->*function)(MachineType::Int32(), backing_store, - WordShl(index_word, 2), value_word32))); + WordShl(index_word, 2), value_word32, nullptr))); BIND(&u32); Return(ChangeUint32ToTagged( (this->*function)(MachineType::Uint32(), backing_store, - WordShl(index_word, 2), value_word32))); + WordShl(index_word, 2), value_word32, nullptr))); + + BIND(&big); + TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value)); +#if DEBUG + DebugSanityCheckAtomicIndex(array, index_word32, context); +#endif + TVARIABLE(UintPtrT, var_low); + TVARIABLE(UintPtrT, var_high); + BigIntToRawBytes(value_bigint, &var_low, &var_high); + Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value()); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)), + &i64); + GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)), + &u64); + Unreachable(); + + BIND(&i64); + // This uses Uint64() intentionally: Atomic* ops are not implemented for + // Int64(), which is fine because the machine instructions only care + // about words. + Return(BigIntFromSigned64( + (this->*function)(MachineType::Uint64(), backing_store, + WordShl(index_word, 3), var_low.value(), high))); + + BIND(&u64); + Return(BigIntFromUnsigned64( + (this->*function)(MachineType::Uint64(), backing_store, + WordShl(index_word, 3), var_low.value(), high))); // This shouldn't happen, we've already validated the type. BIND(&other); |