summaryrefslogtreecommitdiff
path: root/chromium/v8/src/code-stub-assembler.cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-03-08 10:28:10 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-03-20 13:40:30 +0000
commite733310db58160074f574c429d48f8308c0afe17 (patch)
treef8aef4b7e62a69928dbcf880620eece20f98c6df /chromium/v8/src/code-stub-assembler.cc
parent2f583e4aec1ae3a86fa047829c96b310dc12ecdf (diff)
downloadqtwebengine-chromium-e733310db58160074f574c429d48f8308c0afe17.tar.gz
BASELINE: Update Chromium to 56.0.2924.122
Change-Id: I4e04de8f47e47e501c46ed934c76a431c6337ced Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/code-stub-assembler.cc')
-rw-r--r--chromium/v8/src/code-stub-assembler.cc4625
1 files changed, 4046 insertions, 579 deletions
diff --git a/chromium/v8/src/code-stub-assembler.cc b/chromium/v8/src/code-stub-assembler.cc
index de615326306..b1ed2f13c7f 100644
--- a/chromium/v8/src/code-stub-assembler.cc
+++ b/chromium/v8/src/code-stub-assembler.cc
@@ -1,7 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
@@ -26,11 +25,32 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const char* name)
: compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
-void CodeStubAssembler::Assert(Node* condition) {
+void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
+ const char* file, int line) {
#if defined(DEBUG)
Label ok(this);
- Comment("[ Assert");
- GotoIf(condition, &ok);
+ Label not_ok(this, Label::kDeferred);
+ if (message != nullptr && FLAG_code_comments) {
+ Comment("[ Assert: %s", message);
+ } else {
+ Comment("[ Assert");
+ }
+ Node* condition = codition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, &ok, &not_ok);
+ Bind(&not_ok);
+ if (message != nullptr) {
+ char chars[1024];
+ Vector<char> buffer(chars);
+ if (file != nullptr) {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+ } else {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ }
+ CallRuntime(
+ Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+ HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+ }
DebugBreak();
Goto(&ok);
Bind(&ok);
@@ -38,9 +58,7 @@ void CodeStubAssembler::Assert(Node* condition) {
#endif
}
-Node* CodeStubAssembler::NoContextConstant() {
- return SmiConstant(Smi::FromInt(0));
-}
+Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
#define HEAP_CONSTANT_ACCESSOR(rootName, name) \
Node* CodeStubAssembler::name##Constant() { \
@@ -73,6 +91,62 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
+Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant + right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return IntPtrAdd(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant - right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return IntPtrSub(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
+ Comment("IntPtrRoundUpToPowerOfTwo32");
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
+ value = IntPtrSub(value, IntPtrConstant(1));
+ for (int i = 1; i <= 16; i *= 2) {
+ value = WordOr(value, WordShr(value, IntPtrConstant(i)));
+ }
+ return IntPtrAdd(value, IntPtrConstant(1));
+}
+
+Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
+ // value && !(value & (value - 1))
+ return WordEqual(
+ Select(WordEqual(value, IntPtrConstant(0)), IntPtrConstant(1),
+ WordAnd(value, IntPtrSub(value, IntPtrConstant(1))),
+ MachineType::PointerRepresentation()),
+ IntPtrConstant(0));
+}
+
Node* CodeStubAssembler::Float64Round(Node* x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
@@ -324,38 +398,39 @@ Node* CodeStubAssembler::SmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(SmiToWord32(value));
}
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
- return IntPtrAddWithOverflow(a, b);
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) {
+ return BitcastWordToTaggedSigned(
+ IntPtrAdd(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
}
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
- return IntPtrSubWithOverflow(a, b);
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) {
+ return BitcastWordToTaggedSigned(
+ IntPtrSub(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
}
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) {
+ return WordEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
+}
Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
- return UintPtrGreaterThan(a, b);
+ return UintPtrGreaterThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
- return UintPtrGreaterThanOrEqual(a, b);
+ return UintPtrGreaterThanOrEqual(BitcastTaggedToWord(a),
+ BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
- return UintPtrLessThan(a, b);
+ return UintPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
- return IntPtrLessThan(a, b);
+ return IntPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
- return IntPtrLessThanOrEqual(a, b);
+ return IntPtrLessThanOrEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
@@ -481,7 +556,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = ChangeFloat64ToTagged(value);
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&return_result);
}
@@ -490,8 +565,9 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
return var_result.value();
}
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
- return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
+ return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
}
Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
@@ -499,6 +575,11 @@ Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
IntPtrConstant(0));
}
+Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
+ return WordEqual(IntPtrConstant(0),
+ WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
+}
+
void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Node* rhs, Node* rhs_map,
Label* if_equal,
@@ -599,10 +680,28 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
}
}
+void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
+ Label* if_false) {
+ GotoIf(TaggedIsSmi(object), if_false);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ if_true, if_false);
+}
+
+void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
+ Label* if_false) {
+ GotoIf(TaggedIsSmi(object), if_false);
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_OBJECT_TYPE)),
+ if_true, if_false);
+}
+
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
// Bailout if receiver is a Smi.
- GotoIf(WordIsSmi(object), if_false);
+ GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
@@ -610,20 +709,14 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
- Node* bit_field2 = LoadMapBitField2(map);
- Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements_kind = LoadMapElementsKind(map);
// Bailout if receiver has slow elements.
- GotoIf(
- Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- if_false);
+ GotoUnless(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements.
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
- Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
- GotoIf(Word32Equal(holey_elements, Int32Constant(0)), if_true);
+ GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
+
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
@@ -644,19 +737,17 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
&no_runtime_call);
Bind(&runtime_call);
- // AllocateInTargetSpace does not use the context.
- Node* context = SmiConstant(Smi::FromInt(0));
-
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
- runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
- SmiTag(size_in_bytes), runtime_flags);
+ runtime_result =
+ CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
} else {
- runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
- SmiTag(size_in_bytes));
+ runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
+ NoContextConstant(), SmiTag(size_in_bytes));
}
result.Bind(runtime_result);
Goto(&merge_runtime);
@@ -730,6 +821,7 @@ Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
}
Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+ Comment("Allocate");
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
@@ -761,6 +853,11 @@ Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
+Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
+ return UintPtrLessThanOrEqual(size,
+ IntPtrConstant(kMaxRegularHeapObjectSize));
+}
+
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
@@ -771,7 +868,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
// Check if {value} is a Smi or a HeapObject.
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -810,9 +907,8 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
MachineType::Float64());
// Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
- Node* zero = Float64Constant(0.0);
- GotoIf(Float64LessThan(zero, value_value), if_true);
- BranchIfFloat64LessThan(value_value, zero, if_true, if_false);
+ Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
+ if_true, if_false);
}
Bind(&if_valueisother);
@@ -827,8 +923,8 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
// Check if the {value} is undetectable.
- BranchIfWord32Equal(value_map_undetectable, Int32Constant(0), if_true,
- if_false);
+ Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
+ if_false);
}
}
}
@@ -926,9 +1022,9 @@ Node* CodeStubAssembler::LoadInstanceType(Node* object) {
return LoadMapInstanceType(LoadMap(object));
}
-void CodeStubAssembler::AssertInstanceType(Node* object,
- InstanceType instance_type) {
- Assert(Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)));
+Node* CodeStubAssembler::HasInstanceType(Node* object,
+ InstanceType instance_type) {
+ return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
}
Node* CodeStubAssembler::LoadProperties(Node* object) {
@@ -939,11 +1035,12 @@ Node* CodeStubAssembler::LoadElements(Node* object) {
return LoadObjectField(object, JSObject::kElementsOffset);
}
-Node* CodeStubAssembler::LoadJSArrayLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadJSArrayLength(Node* array) {
+ CSA_ASSERT(this, IsJSArray(array));
return LoadObjectField(array, JSArray::kLengthOffset);
}
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
return LoadObjectField(array, FixedArrayBase::kLengthOffset);
}
@@ -952,14 +1049,17 @@ Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
}
Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
}
@@ -968,44 +1068,64 @@ Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
}
Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field2 = LoadMapBitField2(map);
- return BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ return DecodeWord32<Map::ElementsKindBits>(bit_field2);
}
Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kDescriptorsOffset);
}
Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kPrototypeOffset);
}
+Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map,
+ Label* if_no_proto_info) {
+ CSA_ASSERT(this, IsMap(map));
+ Node* prototype_info =
+ LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+ GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
+ GotoUnless(WordEqual(LoadMap(prototype_info),
+ LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+ if_no_proto_info);
+ return prototype_info;
+}
+
Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return ChangeUint32ToWord(
LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectProperties() for details.
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(FIRST_JS_OBJECT_TYPE)));
+ CSA_ASSERT(this,
+ Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(FIRST_JS_OBJECT_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- Assert(Int32LessThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(LAST_PRIMITIVE_TYPE)));
+ CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(LAST_PRIMITIVE_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
Variable result(this, MachineRepresentation::kTagged);
result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
@@ -1013,7 +1133,7 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
Goto(&loop);
Bind(&loop);
{
- GotoIf(WordIsSmi(result.value()), &done);
+ GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
GotoUnless(is_map_type, &done);
@@ -1026,6 +1146,7 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
}
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+ CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
}
@@ -1041,15 +1162,23 @@ Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
}
Node* CodeStubAssembler::LoadStringLength(Node* object) {
+ CSA_ASSERT(this, IsString(object));
return LoadObjectField(object, String::kLengthOffset);
}
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
+ CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
}
+Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
+ // TODO(ishell): fix callers.
+ return LoadObjectField(weak_cell, WeakCell::kValueOffset);
+}
+
Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
- Node* value = LoadObjectField(weak_cell, WeakCell::kValueOffset);
+ CSA_ASSERT(this, IsWeakCell(weak_cell));
+ Node* value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
@@ -1066,6 +1195,44 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
return Load(MachineType::AnyTagged(), object, offset);
}
+Node* CodeStubAssembler::LoadFixedTypedArrayElement(
+ Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+ ParameterMode parameter_mode) {
+ Node* offset =
+ ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
+ MachineType type;
+ switch (elements_kind) {
+ case UINT8_ELEMENTS: /* fall through */
+ case UINT8_CLAMPED_ELEMENTS:
+ type = MachineType::Uint8();
+ break;
+ case INT8_ELEMENTS:
+ type = MachineType::Int8();
+ break;
+ case UINT16_ELEMENTS:
+ type = MachineType::Uint16();
+ break;
+ case INT16_ELEMENTS:
+ type = MachineType::Int16();
+ break;
+ case UINT32_ELEMENTS:
+ type = MachineType::Uint32();
+ break;
+ case INT32_ELEMENTS:
+ type = MachineType::Int32();
+ break;
+ case FLOAT32_ELEMENTS:
+ type = MachineType::Float32();
+ break;
+ case FLOAT64_ELEMENTS:
+ type = MachineType::Float64();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return Load(type, data_pointer, offset);
+}
+
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
@@ -1088,6 +1255,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
+ CSA_ASSERT(this, IsFixedDoubleArray(object));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
@@ -1125,12 +1293,35 @@ Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
}
+Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
+ Node* value) {
+ int offset = Context::SlotOffset(slot_index);
+ return Store(MachineRepresentation::kTagged, context, IntPtrConstant(offset),
+ value);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
Node* CodeStubAssembler::LoadNativeContext(Node* context) {
return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
}
Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
Node* native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
return LoadFixedArrayElement(native_context,
IntPtrConstant(Context::ArrayMapIndex(kind)));
}
@@ -1206,6 +1397,7 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+ CSA_ASSERT(this, IsFixedDoubleArray(object));
Node* offset =
ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
@@ -1230,8 +1422,11 @@ Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
- Node* result = Allocate(SeqOneByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
+ AllocationFlags flags) {
+ Comment("AllocateSeqOneByteString");
+ Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
@@ -1241,27 +1436,31 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ Comment("AllocateSeqOneByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
- Node* size = WordAnd(
- IntPtrAdd(
- IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
- IntPtrConstant(kObjectAlignmentMask)),
- IntPtrConstant(~kObjectAlignmentMask));
+ Node* raw_size = GetArrayAllocationSize(
+ length, UINT8_ELEMENTS, mode,
+ SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+ Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
- Node* result = Allocate(size);
+ Node* result = Allocate(size, flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(
+ result, SeqOneByteString::kLengthOffset,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
IntPtrConstant(String::kEmptyHashField),
MachineRepresentation::kWord32);
@@ -1272,8 +1471,9 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
- SmiFromWord(length));
+ Node* result =
+ CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
var_result.Bind(result);
Goto(&if_join);
}
@@ -1282,8 +1482,11 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
- Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
+ AllocationFlags flags) {
+ Comment("AllocateSeqTwoByteString");
+ Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
@@ -1293,27 +1496,31 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
return result;
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ Comment("AllocateSeqTwoByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
- Node* size = WordAnd(
- IntPtrAdd(IntPtrAdd(WordShl(length, 1),
- IntPtrConstant(SeqTwoByteString::kHeaderSize)),
- IntPtrConstant(kObjectAlignmentMask)),
- IntPtrConstant(~kObjectAlignmentMask));
+ Node* raw_size = GetArrayAllocationSize(
+ length, UINT16_ELEMENTS, mode,
+ SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+ Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
- Node* result = Allocate(size);
+ Node* result = Allocate(size, flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(
+ result, SeqTwoByteString::kLengthOffset,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
IntPtrConstant(String::kEmptyHashField),
MachineRepresentation::kWord32);
@@ -1324,8 +1531,9 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
- SmiFromWord(length));
+ Node* result =
+ CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
var_result.Bind(result);
Goto(&if_join);
}
@@ -1334,10 +1542,13 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
- Node* offset) {
+Node* CodeStubAssembler::AllocateSlicedString(
+ Heap::RootListIndex map_root_index, Node* length, Node* parent,
+ Node* offset) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(SlicedString::kSize);
- Node* map = LoadRoot(Heap::kSlicedOneByteStringMapRootIndex);
+ Node* map = LoadRoot(map_root_index);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
@@ -1351,28 +1562,118 @@ Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
return result;
}
+Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+ Node* offset) {
+ return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
+ parent, offset);
+}
+
Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
Node* offset) {
- Node* result = Allocate(SlicedString::kSize);
- Node* map = LoadRoot(Heap::kSlicedStringMapRootIndex);
+ return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
+ offset);
+}
+
+Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
+ Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ Node* result = Allocate(ConsString::kSize, flags);
+ Node* map = LoadRoot(map_root_index);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
Int32Constant(String::kEmptyHashField),
MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
- MachineRepresentation::kTagged);
+ bool const new_space = !(flags & kPretenured);
+ if (new_space) {
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
+ MachineRepresentation::kTagged);
+ } else {
+ StoreObjectField(result, ConsString::kFirstOffset, first);
+ StoreObjectField(result, ConsString::kSecondOffset, second);
+ }
return result;
}
+Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
+ second, flags);
+}
+
+Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
+ second, flags);
+}
+
+Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
+ Node* right, AllocationFlags flags) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ // Added string can be a cons string.
+ Comment("Allocating ConsString");
+ Node* left_instance_type = LoadInstanceType(left);
+ Node* right_instance_type = LoadInstanceType(right);
+
+ // Compute intersection and difference of instance types.
+ Node* anded_instance_types = WordAnd(left_instance_type, right_instance_type);
+ Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+ // We create a one-byte cons string if
+ // 1. both strings are one-byte, or
+ // 2. at least one of the strings is two-byte, but happens to contain only
+ // one-byte characters.
+ // To do this, we check
+ // 1. if both strings are one-byte, or if the one-byte data hint is set in
+ // both strings, or
+ // 2. if one of the strings has the one-byte data hint set and the other
+ // string is one-byte.
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kOneByteDataHintTag != 0);
+ Label one_byte_map(this);
+ Label two_byte_map(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+ GotoIf(WordNotEqual(
+ WordAnd(anded_instance_types,
+ IntPtrConstant(kStringEncodingMask | kOneByteDataHintTag)),
+ IntPtrConstant(0)),
+ &one_byte_map);
+ Branch(WordNotEqual(WordAnd(xored_instance_types,
+ IntPtrConstant(kStringEncodingMask |
+ kOneByteDataHintMask)),
+ IntPtrConstant(kOneByteStringTag | kOneByteDataHintTag)),
+ &two_byte_map, &one_byte_map);
+
+ Bind(&one_byte_map);
+ Comment("One-byte ConsString");
+ result.Bind(AllocateOneByteConsString(length, left, right, flags));
+ Goto(&done);
+
+ Bind(&two_byte_map);
+ Comment("Two-byte ConsString");
+ result.Bind(AllocateTwoByteConsString(length, left, right, flags));
+ Goto(&done);
+
+ Bind(&done);
+
+ return result.value();
+}
+
Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
Node* const max_length =
SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
- Assert(SmiLessThanOrEqual(length, max_length));
+ CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
+ USE(max_length);
// Allocate the JSRegExpResult.
// TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
@@ -1412,6 +1713,120 @@ Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
return result;
}
+Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) {
+ return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
+}
+
+Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ at_least_space_for,
+ IntPtrConstant(NameDictionary::kMaxCapacity)));
+
+ Node* capacity = HashTableComputeCapacity(at_least_space_for);
+ CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+
+ Node* length = EntryToIndex<NameDictionary>(capacity);
+ Node* store_size =
+ IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
+ IntPtrConstant(NameDictionary::kHeaderSize));
+
+ Node* result = Allocate(store_size);
+ Comment("Initialize NameDictionary");
+ // Initialize FixedArray fields.
+ StoreObjectFieldRoot(result, FixedArray::kMapOffset,
+ Heap::kHashTableMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+ SmiFromWord(length));
+ // Initialized HashTable fields.
+ Node* zero = SmiConstant(0);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
+ zero, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
+ SmiTag(capacity), SKIP_WRITE_BARRIER);
+ // Initialize Dictionary fields.
+ Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
+ SmiConstant(PropertyDetails::kInitialIndex),
+ SKIP_WRITE_BARRIER);
+
+ // Initialize NameDictionary elements.
+ result = BitcastTaggedToWord(result);
+ Node* start_address = IntPtrAdd(
+ result, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+ NameDictionary::kElementsStartIndex) -
+ kHeapObjectTag));
+ Node* end_address = IntPtrAdd(
+ result,
+ IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
+ Node* elements) {
+ CSA_ASSERT(this, IsMap(map));
+ Node* size =
+ IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
+ CSA_ASSERT(this, IsRegularHeapObjectSize(size));
+ Node* object = Allocate(size);
+ StoreMapNoWriteBarrier(object, map);
+ InitializeJSObjectFromMap(object, map, size, properties, elements);
+ return object;
+}
+
+void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
+ Node* size, Node* properties,
+ Node* elements) {
+ // This helper assumes that the object is in new-space, as guarded by the
+ // check in AllocatedJSObjectFromMap.
+ if (properties == nullptr) {
+ CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
+ StoreObjectFieldRoot(object, JSObject::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ } else {
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+ properties);
+ }
+ if (elements == nullptr) {
+ StoreObjectFieldRoot(object, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ } else {
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
+ }
+ InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
+}
+
+void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
+ Node* size, int start_offset) {
+ // TODO(cbruni): activate in-object slack tracking machinery.
+ Comment("InitializeJSObjectBody");
+ Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ // Calculate the untagged field addresses.
+ Node* start_address =
+ IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
+ Node* end_address =
+ IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+}
+
+void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
+ Node* end_address,
+ Node* value) {
+ Comment("StoreFieldsNoWriteBarrier");
+ CSA_ASSERT(this, WordIsWordAligned(start_address));
+ CSA_ASSERT(this, WordIsWordAligned(end_address));
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start_address, end_address,
+ [value](CodeStubAssembler* a, Node* current) {
+ a->StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+ },
+ kPointerSize, IndexAdvanceMode::kPost);
+}
+
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
@@ -1446,7 +1861,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
Node* array = AllocateUninitializedJSArray(kind, array_map, length,
allocation_site, size);
- Node* elements = InnerAllocate(array, elements_offset);
+ // The bitcast here is safe because InnerAllocate doesn't actually allocate.
+ Node* elements = InnerAllocate(BitcastTaggedToWord(array), elements_offset);
StoreObjectField(array, JSObject::kElementsOffset, elements);
return {array, elements};
@@ -1492,8 +1908,10 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
TagParameter(capacity, capacity_mode));
// Fill in the elements with holes.
- FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
- Heap::kTheHoleValueRootIndex, capacity_mode);
+ FillFixedArrayWithValue(
+ kind, elements, capacity_mode == SMI_PARAMETERS ? SmiConstant(Smi::kZero)
+ : IntPtrConstant(0),
+ capacity, Heap::kTheHoleValueRootIndex, capacity_mode);
return array;
}
@@ -1502,6 +1920,8 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
+ CSA_ASSERT(this,
+ IntPtrGreaterThan(capacity_node, IntPtrOrSmiConstant(0, mode)));
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
// Allocate both array and elements object, and initialize the JSArray.
@@ -1532,86 +1952,37 @@ void CodeStubAssembler::FillFixedArrayWithValue(
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
Node* value = LoadRoot(value_root_index);
- const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
- int32_t to;
- bool constant_to = ToInt32Constant(to_node, to);
- int32_t from;
- bool constant_from = ToInt32Constant(from_node, from);
- if (constant_to && constant_from &&
- (to - from) <= kElementLoopUnrollThreshold) {
- for (int i = from; i < to; ++i) {
- Node* index = IntPtrConstant(i);
- if (is_double) {
- Node* offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- first_element_offset);
- // Don't use doubles to store the hole double, since manipulating the
- // signaling NaN used for the hole in C++, e.g. with bit_cast, will
- // change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- //
- // TODO(danno): When we have a Float32/Float64 wrapper class that
- // preserves double bits during manipulation, remove this code/change
- // this to an indexed Float64 store.
- if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
- double_hole);
+ BuildFastFixedArrayForEach(
+ array, kind, from_node, to_node,
+ [value, is_double, double_hole](CodeStubAssembler* assembler, Node* array,
+ Node* offset) {
+ if (is_double) {
+ // Don't use doubles to store the hole double, since manipulating the
+ // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+ // change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ //
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (assembler->Is64()) {
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord64,
+ array, offset, double_hole);
+ } else {
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord32,
+ array, offset, double_hole);
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, array,
+ assembler->IntPtrAdd(offset,
+ assembler->IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
} else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
- double_hole);
- offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- first_element_offset + kPointerSize);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
- double_hole);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, array,
+ offset, value);
}
- } else {
- StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER,
- INTPTR_PARAMETERS);
- }
- }
- } else {
- Variable current(this, MachineRepresentation::kTagged);
- Label test(this);
- Label decrement(this, &current);
- Label done(this);
- Node* limit =
- IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
- current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
-
- Branch(WordEqual(current.value(), limit), &done, &decrement);
-
- Bind(&decrement);
- current.Bind(IntPtrSub(
- current.value(),
- IntPtrConstant(IsFastDoubleElementsKind(kind) ? kDoubleSize
- : kPointerSize)));
- if (is_double) {
- // Don't use doubles to store the hole double, since manipulating the
- // signaling NaN used for the hole in C++, e.g. with bit_cast, will
- // change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- //
- // TODO(danno): When we have a Float32/Float64 wrapper class that
- // preserves double bits during manipulation, remove this code/change
- // this to an indexed Float64 store.
- if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
- Int64Constant(first_element_offset), double_hole);
- } else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
- Int32Constant(first_element_offset), double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
- Int32Constant(kPointerSize + first_element_offset),
- double_hole);
- }
- } else {
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), current.value(),
- IntPtrConstant(first_element_offset), value);
- }
- Node* compare = WordNotEqual(current.value(), limit);
- Branch(compare, &decrement, &done);
-
- Bind(&done);
- }
+ },
+ mode);
}
void CodeStubAssembler::CopyFixedArrayElements(
@@ -1710,8 +2081,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
value);
} else {
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
- to_offset, value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset,
+ value);
}
Goto(&next_iter);
@@ -1748,73 +2119,66 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-void CodeStubAssembler::CopyStringCharacters(compiler::Node* from_string,
- compiler::Node* to_string,
- compiler::Node* from_index,
- compiler::Node* character_count,
- String::Encoding encoding) {
- Label out(this);
-
- // Nothing to do for zero characters.
-
- GotoIf(SmiLessThanOrEqual(character_count, SmiConstant(Smi::FromInt(0))),
- &out);
-
- // Calculate offsets into the strings.
-
- Node* from_offset;
- Node* limit_offset;
- Node* to_offset;
-
- {
- Node* byte_count = SmiUntag(character_count);
- Node* from_byte_index = SmiUntag(from_index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
- limit_offset = IntPtrAdd(from_offset, byte_count);
- to_offset = IntPtrConstant(offset);
- } else {
- STATIC_ASSERT(2 == sizeof(uc16));
- byte_count = WordShl(byte_count, 1);
- from_byte_index = WordShl(from_byte_index, 1);
-
- const int offset = SeqTwoByteString::kHeaderSize - kHeapObjectTag;
- from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
- limit_offset = IntPtrAdd(from_offset, byte_count);
- to_offset = IntPtrConstant(offset);
- }
- }
-
- Variable var_from_offset(this, MachineType::PointerRepresentation());
- Variable var_to_offset(this, MachineType::PointerRepresentation());
-
- var_from_offset.Bind(from_offset);
- var_to_offset.Bind(to_offset);
-
- Variable* vars[] = {&var_from_offset, &var_to_offset};
- Label decrement(this, 2, vars);
-
- Label loop(this, 2, vars);
- Goto(&loop);
- Bind(&loop);
- {
- from_offset = var_from_offset.value();
- to_offset = var_to_offset.value();
-
- // TODO(jgruber): We could make this faster through larger copy unit sizes.
- Node* value = Load(MachineType::Uint8(), from_string, from_offset);
- StoreNoWriteBarrier(MachineRepresentation::kWord8, to_string, to_offset,
- value);
-
- Node* new_from_offset = IntPtrAdd(from_offset, IntPtrConstant(1));
- var_from_offset.Bind(new_from_offset);
- var_to_offset.Bind(IntPtrAdd(to_offset, IntPtrConstant(1)));
-
- Branch(WordNotEqual(new_from_offset, limit_offset), &loop, &out);
- }
-
- Bind(&out);
+void CodeStubAssembler::CopyStringCharacters(
+ compiler::Node* from_string, compiler::Node* to_string,
+ compiler::Node* from_index, compiler::Node* to_index,
+ compiler::Node* character_count, String::Encoding from_encoding,
+ String::Encoding to_encoding, ParameterMode mode) {
+ bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
+ bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
+ DCHECK_IMPLIES(to_one_byte, from_one_byte);
+ Comment("CopyStringCharacters %s -> %s",
+ from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING",
+ to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
+
+ ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* from_offset =
+ ElementOffsetFromIndex(from_index, from_kind, mode, header_size);
+ Node* to_offset =
+ ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
+ Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
+ Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
+
+ // Prepare the fast loop
+ MachineType type =
+ from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
+ MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
+ : MachineRepresentation::kWord16;
+ int from_increment = 1 << ElementsKindToShiftSize(from_kind);
+ int to_increment = 1 << ElementsKindToShiftSize(to_kind);
+
+ Variable current_to_offset(this, MachineType::PointerRepresentation());
+ VariableList vars({&current_to_offset}, zone());
+ current_to_offset.Bind(to_offset);
+ int to_index_constant = 0, from_index_constant = 0;
+ Smi* to_index_smi = nullptr;
+ Smi* from_index_smi = nullptr;
+ bool index_same = (from_encoding == to_encoding) &&
+ (from_index == to_index ||
+ (ToInt32Constant(from_index, from_index_constant) &&
+ ToInt32Constant(to_index, to_index_constant) &&
+ from_index_constant == to_index_constant) ||
+ (ToSmiConstant(from_index, from_index_smi) &&
+ ToSmiConstant(to_index, to_index_smi) &&
+ to_index_smi == from_index_smi));
+ BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
+ limit_offset,
+ [from_string, to_string, &current_to_offset, to_increment, type,
+ rep, index_same](CodeStubAssembler* assembler, Node* offset) {
+ Node* value = assembler->Load(type, from_string, offset);
+ assembler->StoreNoWriteBarrier(
+ rep, to_string,
+ index_same ? offset : current_to_offset.value(), value);
+ if (!index_same) {
+ current_to_offset.Bind(assembler->IntPtrAdd(
+ current_to_offset.value(),
+ assembler->IntPtrConstant(to_increment)));
+ }
+ },
+ from_increment, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -1831,7 +2195,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
return value;
} else {
- Node* value = Load(MachineType::Pointer(), array, offset);
+ Node* value = Load(MachineType::AnyTagged(), array, offset);
if (if_hole) {
GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
}
@@ -1907,10 +2271,6 @@ Node* CodeStubAssembler::GrowElementsCapacity(
// Allocate the new backing store.
Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
- // Fill in the added capacity in the new store with holes.
- FillFixedArrayWithValue(to_kind, new_elements, capacity, new_capacity,
- Heap::kTheHoleValueRootIndex, mode);
-
// Copy the elements from the old elements store to the new.
// The size-check above guarantees that the |new_elements| is allocated
// in new space so we can skip the write barrier.
@@ -1935,13 +2295,47 @@ void CodeStubAssembler::InitializeAllocationMemento(
if (FLAG_allocation_site_pretenuring) {
Node* count = LoadObjectField(allocation_site,
AllocationSite::kPretenureCreateCountOffset);
- Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+ Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1)));
StoreObjectFieldNoWriteBarrier(allocation_site,
AllocationSite::kPretenureCreateCountOffset,
incremented_count);
}
}
+Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
+ Label* if_valueisnotnumber) {
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kFloat64);
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToFloat64(value));
+ Goto(&out);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this);
+ Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+ if_valueisnotnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Load the floating point value.
+ var_result.Bind(LoadHeapNumberValue(value));
+ Goto(&out);
+ }
+ }
+ Bind(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
// We might need to loop once due to ToNumber conversion.
Variable var_value(this, MachineRepresentation::kTagged),
@@ -1951,42 +2345,23 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
Goto(&loop);
Bind(&loop);
{
+ Label if_valueisnotnumber(this, Label::kDeferred);
+
// Load the current {value}.
value = var_value.value();
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ // Convert {value} to Float64 if it is a number and convert it to a number
+ // otherwise.
+ Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber);
+ var_result.Bind(result);
+ Goto(&done_loop);
- Bind(&if_valueissmi);
+ Bind(&if_valueisnotnumber);
{
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
}
}
Bind(&done_loop);
@@ -2007,7 +2382,7 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -2060,8 +2435,8 @@ Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
Bind(&if_valueisequal);
{
GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
- BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
- &if_valueisheapnumber, &if_valueisint32);
+ Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)),
+ &if_valueisheapnumber, &if_valueisint32);
}
Bind(&if_valueisnotequal);
Goto(&if_valueisheapnumber);
@@ -2169,7 +2544,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
if_valueisstring(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueisnotsmi);
{
// Load the instance type of the {value}.
@@ -2237,9 +2612,9 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
value = var_value.value();
// Check if the {value} is a Smi or a HeapObject.
- GotoIf(WordIsSmi(value), (primitive_type == PrimitiveType::kNumber)
- ? &done_loop
- : &done_throw);
+ GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber)
+ ? &done_loop
+ : &done_throw);
// Load the mape of the {value}.
Node* value_map = LoadMap(value);
@@ -2301,7 +2676,7 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
Label out(this), throw_exception(this, Label::kDeferred);
Variable var_value_map(this, MachineRepresentation::kTagged);
- GotoIf(WordIsSmi(value), &throw_exception);
+ GotoIf(TaggedIsSmi(value), &throw_exception);
// Load the instance type of the {value}.
var_value_map.Bind(LoadMap(value));
@@ -2323,6 +2698,37 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
+Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this, Select(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+Node* CodeStubAssembler::IsDictionaryMap(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* bit_field3 = LoadMapBitField3(map);
+ return Word32NotEqual(IsSetWord32<Map::DictionaryMap>(bit_field3),
+ Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsCallableMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return Word32NotEqual(
+ Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
+ STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
+ return Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE));
+}
+
Node* CodeStubAssembler::IsStringInstanceType(Node* instance_type) {
STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
@@ -2334,7 +2740,71 @@ Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
+Node* CodeStubAssembler::IsJSReceiver(Node* object) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return IsJSReceiverInstanceType(LoadInstanceType(object));
+}
+
+Node* CodeStubAssembler::IsJSObject(Node* object) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_RECEIVER_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
+ return Word32Equal(LoadInstanceType(object),
+ Int32Constant(JS_GLOBAL_PROXY_TYPE));
+}
+
+Node* CodeStubAssembler::IsMap(Node* map) {
+ return HasInstanceType(map, MAP_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSValue(Node* map) {
+ return HasInstanceType(map, JS_VALUE_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSArray(Node* object) {
+ return HasInstanceType(object, JS_ARRAY_TYPE);
+}
+
+Node* CodeStubAssembler::IsWeakCell(Node* object) {
+ return HasInstanceType(object, WEAK_CELL_TYPE);
+}
+
+Node* CodeStubAssembler::IsName(Node* object) {
+ return Int32LessThanOrEqual(LoadInstanceType(object),
+ Int32Constant(LAST_NAME_TYPE));
+}
+
+Node* CodeStubAssembler::IsString(Node* object) {
+ return Int32LessThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_NONSTRING_TYPE));
+}
+
+Node* CodeStubAssembler::IsNativeContext(Node* object) {
+ return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsFixedDoubleArray(Node* object) {
+ return WordEqual(LoadMap(object), FixedDoubleArrayMapConstant());
+}
+
+Node* CodeStubAssembler::IsHashTable(Node* object) {
+ return WordEqual(LoadMap(object), LoadRoot(Heap::kHashTableMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsDictionary(Node* object) {
+ return WordOr(IsHashTable(object), IsUnseededNumberDictionary(object));
+}
+
+Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
+ return WordEqual(LoadMap(object),
+ LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
+}
+
Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+ CSA_ASSERT(this, IsString(string));
// Translate the {index} into a Word.
index = SmiToWord(index);
@@ -2580,6 +3050,8 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
Label end(a), two_byte_sequential(a);
Variable var_result(a, MachineRepresentation::kTagged);
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
a->GotoIf(a->Word32Equal(a->Word32And(from_instance_type,
a->Int32Constant(kStringEncodingMask)),
@@ -2590,8 +3062,10 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
{
Node* result =
a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, character_count,
- String::ONE_BYTE_ENCODING);
+ a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
a->Goto(&end);
@@ -2602,8 +3076,10 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
{
Node* result =
a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, character_count,
- String::TWO_BYTE_ENCODING);
+ a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
a->Goto(&end);
@@ -2632,7 +3108,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Make sure first argument is a string.
// Bailout if receiver is a Smi.
- GotoIf(WordIsSmi(string), &runtime);
+ GotoIf(TaggedIsSmi(string), &runtime);
// Load the instance type of the {string}.
Node* const instance_type = LoadInstanceType(string);
@@ -2814,7 +3290,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
GotoIf(SmiAbove(substr_length, string_length), &runtime);
// Equal length - check if {from, to} == {0, str.length}.
- GotoIf(SmiAbove(from, SmiConstant(Smi::FromInt(0))), &runtime);
+ GotoIf(SmiAbove(from, SmiConstant(Smi::kZero)), &runtime);
// Return the original string (substr_length == string_length).
@@ -2837,6 +3313,178 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
return var_result.value();
}
+Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
+ AllocationFlags flags) {
+ Label check_right(this);
+ Label runtime(this, Label::kDeferred);
+ Label cons(this);
+ Label non_cons(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+ Label done_native(this, &result);
+ Counters* counters = isolate()->counters();
+
+ Node* left_length = LoadStringLength(left);
+ GotoIf(WordNotEqual(IntPtrConstant(0), left_length), &check_right);
+ result.Bind(right);
+ Goto(&done_native);
+
+ Bind(&check_right);
+ Node* right_length = LoadStringLength(right);
+ GotoIf(WordNotEqual(IntPtrConstant(0), right_length), &cons);
+ result.Bind(left);
+ Goto(&done_native);
+
+ Bind(&cons);
+ CSA_ASSERT(this, TaggedIsSmi(left_length));
+ CSA_ASSERT(this, TaggedIsSmi(right_length));
+ Node* new_length = SmiAdd(left_length, right_length);
+ GotoIf(UintPtrGreaterThanOrEqual(
+ new_length, SmiConstant(Smi::FromInt(String::kMaxLength))),
+ &runtime);
+
+ GotoIf(IntPtrLessThan(new_length,
+ SmiConstant(Smi::FromInt(ConsString::kMinLength))),
+ &non_cons);
+
+ result.Bind(NewConsString(context, new_length, left, right, flags));
+ Goto(&done_native);
+
+ Bind(&non_cons);
+
+ Comment("Full string concatenate");
+ Node* left_instance_type = LoadInstanceType(left);
+ Node* right_instance_type = LoadInstanceType(right);
+ // Compute intersection and difference of instance types.
+
+ Node* ored_instance_types = WordOr(left_instance_type, right_instance_type);
+ Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+ // Check if both strings have the same encoding and both are sequential.
+ GotoIf(WordNotEqual(
+ WordAnd(xored_instance_types, IntPtrConstant(kStringEncodingMask)),
+ IntPtrConstant(0)),
+ &runtime);
+ GotoIf(WordNotEqual(WordAnd(ored_instance_types,
+ IntPtrConstant(kStringRepresentationMask)),
+ IntPtrConstant(0)),
+ &runtime);
+
+ Label two_byte(this);
+ GotoIf(WordEqual(
+ WordAnd(ored_instance_types, IntPtrConstant(kStringEncodingMask)),
+ IntPtrConstant(kTwoByteStringTag)),
+ &two_byte);
+ // One-byte sequential string case
+ Node* new_string =
+ AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+ SmiConstant(Smi::kZero), left_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+ SMI_PARAMETERS);
+ CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero), left_length,
+ right_length, String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
+ result.Bind(new_string);
+ Goto(&done_native);
+
+ Bind(&two_byte);
+ {
+ // Two-byte sequential string case
+ new_string = AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+ SmiConstant(Smi::kZero), left_length,
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ SMI_PARAMETERS);
+ CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero),
+ left_length, right_length, String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+ result.Bind(new_string);
+ Goto(&done_native);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
+ Goto(&done);
+ }
+
+ Bind(&done_native);
+ {
+ IncrementCounter(counters->string_add_native(), 1);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
+ Node* needle_char, Node* from) {
+ CSA_ASSERT(this, IsString(string));
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Label out(this), runtime(this, Label::kDeferred);
+
+ // Let runtime handle non-one-byte {needle_char}.
+
+ Node* const one_byte_char_mask = IntPtrConstant(0xFF);
+ GotoUnless(WordEqual(WordAnd(needle_char, one_byte_char_mask), needle_char),
+ &runtime);
+
+ // TODO(jgruber): Handle external and two-byte strings.
+
+ Node* const one_byte_seq_mask = Int32Constant(
+ kIsIndirectStringMask | kExternalStringTag | kStringEncodingMask);
+ Node* const expected_masked = Int32Constant(kOneByteStringTag);
+
+ Node* const string_instance_type = LoadInstanceType(string);
+ GotoUnless(Word32Equal(Word32And(string_instance_type, one_byte_seq_mask),
+ expected_masked),
+ &runtime);
+
+ // If we reach this, {string} is a non-indirect, non-external one-byte string.
+
+ Node* const length = LoadStringLength(string);
+ Node* const search_range_length = SmiUntag(SmiSub(length, from));
+
+ const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* const begin = IntPtrConstant(offset);
+ Node* const cursor = IntPtrAdd(begin, SmiUntag(from));
+ Node* const end = IntPtrAdd(cursor, search_range_length);
+
+ var_result.Bind(SmiConstant(Smi::FromInt(-1)));
+
+ BuildFastLoop(MachineType::PointerRepresentation(), cursor, end,
+ [string, needle_char, begin, &var_result, &out](
+ CodeStubAssembler* csa, Node* cursor) {
+ Label next(csa);
+ Node* value = csa->Load(MachineType::Uint8(), string, cursor);
+ csa->GotoUnless(csa->WordEqual(value, needle_char), &next);
+
+ // Found a match.
+ Node* index = csa->SmiTag(csa->IntPtrSub(cursor, begin));
+ var_result.Bind(index);
+ csa->Goto(&out);
+
+ csa->Bind(&next);
+ },
+ 1, IndexAdvanceMode::kPost);
+ Goto(&out);
+
+ Bind(&runtime);
+ {
+ Node* const pattern = StringFromCharCode(needle_char);
+ Node* const result =
+ CallRuntime(Runtime::kStringIndexOf, context, string, pattern, from);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
UnicodeEncoding encoding) {
Variable var_result(this, MachineRepresentation::kTagged);
@@ -2901,7 +3549,8 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
- var_result.Bind(SmiTag(BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+ var_result.Bind(
+ SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
Goto(&end);
Bind(&runtime);
@@ -2914,6 +3563,85 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
return var_result.value();
}
+Node* CodeStubAssembler::NumberToString(compiler::Node* context,
+ compiler::Node* argument) {
+ Variable result(this, MachineRepresentation::kTagged);
+ Label runtime(this, Label::kDeferred);
+ Label smi(this);
+ Label done(this, &result);
+
+ // Load the number string cache.
+ Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Node* mask = LoadFixedArrayBaseLength(number_string_cache);
+ Node* one = IntPtrConstant(1);
+ mask = IntPtrSub(mask, one);
+
+ GotoIf(TaggedIsSmi(argument), &smi);
+
+ // Argument isn't smi, check to see if it's a heap-number.
+ Node* map = LoadMap(argument);
+ GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+ // Make a hash from the two 32-bit values of the double.
+ Node* low =
+ LoadObjectField(argument, HeapNumber::kValueOffset, MachineType::Int32());
+ Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
+ MachineType::Int32());
+ Node* hash = Word32Xor(low, high);
+ if (Is64()) hash = ChangeInt32ToInt64(hash);
+ hash = WordShl(hash, one);
+ Node* index = WordAnd(hash, SmiToWord(mask));
+
+ // Cache entry's key must be a heap number
+ Node* number_key =
+ LoadFixedArrayElement(number_string_cache, index, 0, INTPTR_PARAMETERS);
+ GotoIf(TaggedIsSmi(number_key), &runtime);
+ map = LoadMap(number_key);
+ GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+ // Cache entry's key must match the heap number value we're looking for.
+ Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
+ MachineType::Int32());
+ Node* high_compare = LoadObjectField(
+ number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
+ GotoUnless(WordEqual(low, low_compare), &runtime);
+ GotoUnless(WordEqual(high, high_compare), &runtime);
+
+ // Heap number match, return value fro cache entry.
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+ result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize,
+ INTPTR_PARAMETERS));
+ Goto(&done);
+
+ Bind(&runtime);
+ {
+ // No cache entry, go to the runtime.
+ result.Bind(CallRuntime(Runtime::kNumberToString, context, argument));
+ }
+ Goto(&done);
+
+ Bind(&smi);
+ {
+ // Load the smi key, make sure it matches the smi we're looking for.
+ Node* smi_index = WordAnd(WordShl(argument, one), mask);
+ Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
+ SMI_PARAMETERS);
+ GotoIf(WordNotEqual(smi_key, argument), &runtime);
+
+ // Smi match, return value from cache entry.
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+ result.Bind(LoadFixedArrayElement(number_string_cache, smi_index,
+ kPointerSize, SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::ToName(Node* context, Node* value) {
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
@@ -2922,7 +3650,7 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
Variable var_result(this, MachineRepresentation::kTagged);
Label is_number(this);
- GotoIf(WordIsSmi(value), &is_number);
+ GotoIf(TaggedIsSmi(value), &is_number);
Label not_name(this);
Node* value_instance_type = LoadInstanceType(value);
@@ -2965,8 +3693,8 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Assert input is a HeapObject (not smi or heap number)
- Assert(Word32BinaryNot(WordIsSmi(input)));
- Assert(Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
+ CSA_ASSERT(this, Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
// We might need to loop once here due to ToPrimitive conversions.
Variable var_input(this, MachineRepresentation::kTagged);
@@ -3015,7 +3743,7 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Check if the {result} is already a Number.
Label if_resultisnumber(this), if_resultisnotnumber(this);
- GotoIf(WordIsSmi(result), &if_resultisnumber);
+ GotoIf(TaggedIsSmi(result), &if_resultisnumber);
Node* result_map = LoadMap(result);
Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
&if_resultisnotnumber);
@@ -3057,7 +3785,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
Label end(this);
Label not_smi(this, Label::kDeferred);
- GotoUnless(WordIsSmi(input), &not_smi);
+ GotoUnless(TaggedIsSmi(input), &not_smi);
var_result.Bind(input);
Goto(&end);
@@ -3082,6 +3810,110 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
return var_result.value();
}
+Node* CodeStubAssembler::ToString(Node* context, Node* input) {
+ Label is_number(this);
+ Label runtime(this, Label::kDeferred);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+
+ GotoIf(TaggedIsSmi(input), &is_number);
+
+ Node* input_map = LoadMap(input);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+
+ result.Bind(input);
+ GotoIf(IsStringInstanceType(input_instance_type), &done);
+
+ Label not_heap_number(this);
+ Branch(WordNotEqual(input_map, HeapNumberMapConstant()), &not_heap_number,
+ &is_number);
+
+ Bind(&is_number);
+ result.Bind(NumberToString(context, input));
+ Goto(&done);
+
+ Bind(&not_heap_number);
+ {
+ GotoIf(Word32NotEqual(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &runtime);
+ result.Bind(LoadObjectField(input, Oddball::kToStringOffset));
+ Goto(&done);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kToString, context, input));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+Node* CodeStubAssembler::FlattenString(Node* string) {
+ CSA_ASSERT(this, IsString(string));
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(string);
+
+ Node* instance_type = LoadInstanceType(string);
+
+ // Check if the {string} is not a ConsString (i.e. already flat).
+ Label is_cons(this, Label::kDeferred), is_flat_in_cons(this), end(this);
+ {
+ GotoUnless(Word32Equal(Word32And(instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kConsStringTag)),
+ &end);
+
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string).
+ Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+ Branch(WordEqual(rhs, EmptyStringConstant()), &is_flat_in_cons, &is_cons);
+ }
+
+ // Bail out to the runtime.
+ Bind(&is_cons);
+ {
+ var_result.Bind(
+ CallRuntime(Runtime::kFlattenString, NoContextConstant(), string));
+ Goto(&end);
+ }
+
+ Bind(&is_flat_in_cons);
+ {
+ var_result.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
+ Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+
+ BranchIfJSReceiver(input, &if_isreceiver, &if_isnotreceiver);
+
+ Bind(&if_isreceiver);
+ {
+ // Convert {input} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ result.Bind(CallStub(callable, context, input));
+ Goto(&done);
+ }
+
+ Bind(&if_isnotreceiver);
+ {
+ result.Bind(input);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
ToIntegerTruncationMode mode) {
// We might need to loop once for ToNumber conversion.
@@ -3098,7 +3930,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
Node* arg = var_arg.value();
// Check if {arg} is a Smi.
- GotoIf(WordIsSmi(arg), &out);
+ GotoIf(TaggedIsSmi(arg), &out);
// Check if {arg} is a HeapNumber.
Label if_argisheapnumber(this),
@@ -3135,7 +3967,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
}
Bind(&return_zero);
- var_arg.Bind(SmiConstant(Smi::FromInt(0)));
+ var_arg.Bind(SmiConstant(Smi::kZero));
Goto(&out);
}
@@ -3143,12 +3975,16 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
return var_arg.value();
}
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
- uint32_t mask) {
+Node* CodeStubAssembler::DecodeWord32(Node* word32, uint32_t shift,
+ uint32_t mask) {
return Word32Shr(Word32And(word32, Int32Constant(mask)),
static_cast<int>(shift));
}
+Node* CodeStubAssembler::DecodeWord(Node* word, uint32_t shift, uint32_t mask) {
+ return WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift));
+}
+
void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
Node* counter_address = ExternalConstant(ExternalReference(counter));
@@ -3218,7 +4054,7 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Goto(if_keyisunique);
Bind(&if_hascachedindex);
- var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
+ var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
Goto(if_keyisindex);
}
@@ -3229,12 +4065,27 @@ Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
field_index));
}
+template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
+template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
+
+Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
+ Node* capacity = IntPtrRoundUpToPowerOfTwo32(
+ WordShl(at_least_space_for, IntPtrConstant(1)));
+ return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
+}
+
+Node* CodeStubAssembler::IntPtrMax(Node* left, Node* right) {
+ return Select(IntPtrGreaterThanOrEqual(left, right), left, right,
+ MachineType::PointerRepresentation());
+}
+
template <typename Dictionary>
void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* unique_name, Label* if_found,
Variable* var_name_index,
Label* if_not_found,
int inlined_probes) {
+ CSA_ASSERT(this, IsDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
Comment("NameDictionaryLookup");
@@ -3319,6 +4170,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Label* if_found,
Variable* var_entry,
Label* if_not_found) {
+ CSA_ASSERT(this, IsDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
@@ -3361,7 +4213,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Label next_probe(this);
{
Label if_currentissmi(this), if_currentisnotsmi(this);
- Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
+ Branch(TaggedIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
Bind(&if_currentissmi);
{
Node* current_value = SmiUntag(current);
@@ -3393,25 +4245,22 @@ void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
Label* if_found,
Variable* var_name_index,
Label* if_not_found) {
- Variable var_descriptor(this, MachineType::PointerRepresentation());
- Label loop(this, &var_descriptor);
- var_descriptor.Bind(IntPtrConstant(0));
- Goto(&loop);
-
- Bind(&loop);
- {
- Node* index = var_descriptor.value();
- Node* name_offset = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
- Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
- GotoIf(WordEqual(index, nof), if_not_found);
- Node* name_index = IntPtrAdd(name_offset, IntPtrMul(index, factor));
- Node* candidate_name =
- LoadFixedArrayElement(descriptors, name_index, 0, INTPTR_PARAMETERS);
- var_name_index->Bind(name_index);
- GotoIf(WordEqual(candidate_name, unique_name), if_found);
- var_descriptor.Bind(IntPtrAdd(index, IntPtrConstant(1)));
- Goto(&loop);
- }
+ Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
+ Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+ Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
+
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
+ [descriptors, unique_name, if_found, var_name_index](
+ CodeStubAssembler* assembler, Node* name_index) {
+ Node* candidate_name = assembler->LoadFixedArrayElement(
+ descriptors, name_index, 0, INTPTR_PARAMETERS);
+ var_name_index->Bind(name_index);
+ assembler->GotoIf(assembler->WordEqual(candidate_name, unique_name),
+ if_found);
+ },
+ -DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
+ Goto(if_not_found);
}
void CodeStubAssembler::TryLookupProperty(
@@ -3428,19 +4277,20 @@ void CodeStubAssembler::TryLookupProperty(
Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
&if_objectisspecial);
- Node* bit_field = LoadMapBitField(map);
- Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
- 1 << Map::kIsAccessCheckNeeded);
- Assert(Word32Equal(Word32And(bit_field, mask), Int32Constant(0)));
+ uint32_t mask =
+ 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
+ USE(mask);
Node* bit_field3 = LoadMapBitField3(map);
- Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
Label if_isfastmap(this), if_isslowmap(this);
- Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, &if_isslowmap);
+ Branch(IsSetWord32<Map::DictionaryMap>(bit_field3), &if_isslowmap,
+ &if_isfastmap);
Bind(&if_isfastmap);
{
Comment("DescriptorArrayLookup");
- Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+ Node* nof =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
// Bail out to the runtime for large numbers of own descriptors. The stub
// only does linear search, which becomes too expensive in that case.
{
@@ -3528,7 +4378,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
name_to_details_offset);
var_details->Bind(details);
- Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
+ Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
@@ -3536,17 +4386,17 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Bind(&if_in_field);
{
Node* field_index =
- BitFieldDecodeWord<PropertyDetails::FieldIndexField>(details);
+ DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
Node* representation =
- BitFieldDecode<PropertyDetails::RepresentationField>(details);
+ DecodeWord32<PropertyDetails::RepresentationField>(details);
Node* inobject_properties = LoadMapInobjectProperties(map);
Label if_inobject(this), if_backing_store(this);
Variable var_double_value(this, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
- BranchIfUintPtrLessThan(field_index, inobject_properties, &if_inobject,
- &if_backing_store);
+ Branch(UintPtrLessThan(field_index, inobject_properties), &if_inobject,
+ &if_backing_store);
Bind(&if_inobject);
{
Comment("if_inobject");
@@ -3556,9 +4406,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
IntPtrConstant(kPointerSize));
Label if_double(this), if_tagged(this);
- BranchIfWord32NotEqual(representation,
- Int32Constant(Representation::kDouble), &if_tagged,
- &if_double);
+ Branch(Word32NotEqual(representation,
+ Int32Constant(Representation::kDouble)),
+ &if_tagged, &if_double);
Bind(&if_tagged);
{
var_value->Bind(LoadObjectField(object, field_offset));
@@ -3584,9 +4434,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Node* value = LoadFixedArrayElement(properties, field_index);
Label if_double(this), if_tagged(this);
- BranchIfWord32NotEqual(representation,
- Int32Constant(Representation::kDouble), &if_tagged,
- &if_double);
+ Branch(Word32NotEqual(representation,
+ Int32Constant(Representation::kDouble)),
+ &if_tagged, &if_double);
Bind(&if_tagged);
{
var_value->Bind(value);
@@ -3623,7 +4473,7 @@ void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
Variable* var_details,
Variable* var_value) {
Comment("LoadPropertyFromNameDictionary");
-
+ CSA_ASSERT(this, IsDictionary(dictionary));
const int name_to_details_offset =
(NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
kPointerSize;
@@ -3647,6 +4497,7 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Variable* var_value,
Label* if_deleted) {
Comment("[ LoadPropertyFromGlobalDictionary");
+ CSA_ASSERT(this, IsDictionary(dictionary));
const int name_to_value_offset =
(GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
@@ -3677,7 +4528,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
var_value.Bind(value);
Label done(this);
- Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
// Accessor case.
@@ -3686,7 +4537,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
Int32Constant(ACCESSOR_INFO_TYPE)),
if_bailout);
- AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+ CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
Node* getter_map = LoadMap(getter);
Node* instance_type = LoadMapInstanceType(getter_map);
@@ -3697,10 +4548,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
// Return undefined if the {getter} is not callable.
var_value.Bind(UndefinedConstant());
- GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
- Int32Constant(1 << Map::kIsCallable)),
- Int32Constant(0)),
- &done);
+ GotoUnless(IsCallableMap(getter_map), &done);
// Call the accessor.
Callable callable = CodeFactory::Call(isolate());
@@ -3837,6 +4685,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
Bind(&if_isdictionary);
{
+ // Negative keys must be converted to property names.
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), if_bailout);
+
Variable var_entry(this, MachineType::PointerRepresentation());
Node* elements = LoadElements(object);
NumberDictionaryLookup<SeededNumberDictionary>(
@@ -3844,18 +4695,18 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
Bind(&if_isfaststringwrapper);
{
- AssertInstanceType(object, JS_VALUE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- Assert(IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isobjectorsmi);
}
Bind(&if_isslowstringwrapper);
{
- AssertInstanceType(object, JS_VALUE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- Assert(IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isdictionary);
@@ -3881,7 +4732,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Label* if_bailout) {
// Ensure receiver is JSReceiver, otherwise bailout.
Label if_objectisnotsmi(this);
- Branch(WordIsSmi(receiver), if_bailout, &if_objectisnotsmi);
+ Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi);
Bind(&if_objectisnotsmi);
Node* map = LoadMap(receiver);
@@ -3991,7 +4842,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
return_runtime(this, Label::kDeferred), return_result(this);
// Goto runtime if {object} is a Smi.
- GotoIf(WordIsSmi(object), &return_runtime);
+ GotoIf(TaggedIsSmi(object), &return_runtime);
// Load map of {object}.
Node* object_map = LoadMap(object);
@@ -4014,7 +4865,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
}
// Goto runtime if {callable} is a Smi.
- GotoIf(WordIsSmi(callable), &return_runtime);
+ GotoIf(TaggedIsSmi(callable), &return_runtime);
// Load map of {callable}.
Node* callable_map = LoadMap(callable);
@@ -4131,8 +4982,10 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
bool constant_index = false;
if (mode == SMI_PARAMETERS) {
element_size_shift -= kSmiShiftBits;
- constant_index = ToIntPtrConstant(index_node, index);
- index = index >> kSmiShiftBits;
+ Smi* smi_index;
+ constant_index = ToSmiConstant(index_node, smi_index);
+ if (constant_index) index = smi_index->value();
+ index_node = BitcastTaggedToWord(index_node);
} else if (mode == INTEGER_PARAMETERS) {
int32_t temp = 0;
constant_index = ToInt32Constant(index_node, temp);
@@ -4147,16 +5000,14 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
if (Is64() && mode == INTEGER_PARAMETERS) {
index_node = ChangeInt32ToInt64(index_node);
}
- if (base_size == 0) {
- return (element_size_shift >= 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift));
- }
- return IntPtrAdd(
- IntPtrConstant(base_size),
- (element_size_shift >= 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+
+ Node* shifted_index =
+ (element_size_shift == 0)
+ ? index_node
+ : ((element_size_shift > 0)
+ ? WordShl(index_node, IntPtrConstant(element_size_shift))
+ : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+ return IntPtrAddFoldConstants(IntPtrConstant(base_size), shifted_index);
}
compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
@@ -4183,11 +5034,10 @@ void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
Variable var_receiver_map(this, MachineRepresentation::kTagged);
- // TODO(ishell): defer blocks when it works.
- Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
+ Label load_smi_map(this, Label::kDeferred), load_receiver_map(this),
if_result(this);
- Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
+ Branch(TaggedIsSmi(receiver), &load_smi_map, &load_receiver_map);
Bind(&load_smi_map);
{
var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
@@ -4205,22 +5055,29 @@ compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
compiler::Node* CodeStubAssembler::TryMonomorphicCase(
compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
Label* if_handler, Variable* var_handler, Label* if_miss) {
+ Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// TODO(ishell): add helper class that hides offset computations for a series
// of loads.
int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS,
- SMI_PARAMETERS, header_size);
- Node* feedback = Load(MachineType::AnyTagged(), vector, offset);
+ // Adding |header_size| with a separate IntPtrAdd rather than passing it
+ // into ElementOffsetFromIndex() allows it to be folded into a single
+ // [base, index, offset] indirect memory access on x64.
+ Node* offset =
+ ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+ Node* feedback = Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size)));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
- GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
+ GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+ if_miss);
- Node* handler = Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
+ Node* handler =
+ Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
var_handler->Bind(handler);
Goto(if_handler);
@@ -4230,6 +5087,7 @@ compiler::Node* CodeStubAssembler::TryMonomorphicCase(
void CodeStubAssembler::HandlePolymorphicCase(
compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_miss, int unroll_count) {
+ Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// Iterate {feedback} array.
@@ -4249,34 +5107,70 @@ void CodeStubAssembler::HandlePolymorphicCase(
Bind(&next_entry);
}
- Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
// Loop from {unroll_count}*kEntrySize to {length}.
- Variable var_index(this, MachineType::PointerRepresentation());
- Label loop(this, &var_index);
- var_index.Bind(IntPtrConstant(unroll_count * kEntrySize));
- Goto(&loop);
- Bind(&loop);
- {
- Node* index = var_index.value();
- GotoIf(UintPtrGreaterThanOrEqual(index, length), if_miss);
-
- Node* cached_map = LoadWeakCellValue(
- LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
-
- Label next_entry(this);
- GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+ Node* init = IntPtrConstant(unroll_count * kEntrySize);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), init, length,
+ [receiver_map, feedback, if_handler, var_handler](CodeStubAssembler* csa,
+ Node* index) {
+ Node* cached_map = csa->LoadWeakCellValue(
+ csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+
+ Label next_entry(csa);
+ csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler = csa->LoadFixedArrayElement(
+ feedback, index, kPointerSize, INTPTR_PARAMETERS);
+ var_handler->Bind(handler);
+ csa->Goto(if_handler);
+
+ csa->Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
+}
+
+void CodeStubAssembler::HandleKeyedStorePolymorphicCase(
+ compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_transition_handler,
+ Variable* var_transition_map_cell, Label* if_miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
- // Found, now call handler.
- Node* handler =
- LoadFixedArrayElement(feedback, index, kPointerSize, INTPTR_PARAMETERS);
- var_handler->Bind(handler);
- Goto(if_handler);
+ const int kEntrySize = 3;
- Bind(&next_entry);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(kEntrySize)));
- Goto(&loop);
- }
+ Node* init = IntPtrConstant(0);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), init, length,
+ [receiver_map, feedback, if_handler, var_handler, if_transition_handler,
+ var_transition_map_cell](CodeStubAssembler* csa, Node* index) {
+ Node* cached_map = csa->LoadWeakCellValue(
+ csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+ Label next_entry(csa);
+ csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ Node* maybe_transition_map_cell = csa->LoadFixedArrayElement(
+ feedback, index, kPointerSize, INTPTR_PARAMETERS);
+
+ var_handler->Bind(csa->LoadFixedArrayElement(
+ feedback, index, 2 * kPointerSize, INTPTR_PARAMETERS));
+ csa->GotoIf(
+ csa->WordEqual(maybe_transition_map_cell,
+ csa->LoadRoot(Heap::kUndefinedValueRootIndex)),
+ if_handler);
+ var_transition_map_cell->Bind(maybe_transition_map_cell);
+ csa->Goto(if_transition_handler);
+
+ csa->Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
}
compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
@@ -4285,9 +5179,10 @@ compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
Node* hash_field = LoadNameHashField(name);
- Assert(Word32Equal(
- Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
- Int32Constant(0)));
+ CSA_ASSERT(this,
+ Word32Equal(Word32And(hash_field,
+ Int32Constant(Name::kHashNotComputedMask)),
+ Int32Constant(0)));
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
@@ -4352,11 +5247,11 @@ void CodeStubAssembler::TryProbeStubCacheTable(
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
- Node* code = Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+ Node* handler = Load(MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
// We found the handler.
- var_handler->Bind(code);
+ var_handler->Bind(handler);
Goto(if_handler);
}
@@ -4369,7 +5264,7 @@ void CodeStubAssembler::TryProbeStubCache(
IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the {receiver} isn't a smi.
- GotoIf(WordIsSmi(receiver), &miss);
+ GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
@@ -4396,7 +5291,7 @@ void CodeStubAssembler::TryProbeStubCache(
Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
Variable var_intptr_key(this, MachineType::PointerRepresentation());
Label done(this, &var_intptr_key), key_is_smi(this);
- GotoIf(WordIsSmi(key), &key_is_smi);
+ GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
{
@@ -4423,6 +5318,7 @@ void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
Node* is_jsarray_condition,
Label* miss) {
Variable var_length(this, MachineType::PointerRepresentation());
+ Comment("Fast elements bounds check");
Label if_array(this), length_loaded(this, &var_length);
GotoIf(is_jsarray_condition, &if_array);
{
@@ -4447,7 +5343,7 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Label* out_of_bounds, Label* miss) {
Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
- if_dictionary(this), unreachable(this);
+ if_dictionary(this);
GotoIf(
IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
&if_nonfast);
@@ -4532,7 +5428,7 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
Node* details = SmiToWord32(
LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
- Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
// Finally, load the value.
@@ -4576,13 +5472,13 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const int kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- static_cast<size_t>(kTypedElementsKindCount));
+ kTypedElementsKindCount);
Bind(&uint8_elements);
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
@@ -4642,114 +5538,370 @@ void CodeStubAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
Comment("have_handler");
- Label call_handler(this);
- GotoUnless(WordIsSmi(handler), &call_handler);
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ var_holder.Bind(p->receiver);
+ Variable var_smi_handler(this, MachineRepresentation::kTagged);
+ var_smi_handler.Bind(handler);
+
+ Variable* vars[] = {&var_holder, &var_smi_handler};
+ Label if_smi_handler(this, 2, vars);
+ Label try_proto_handler(this), call_handler(this);
+
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
- // |handler| is a Smi, encoding what to do. See handler-configuration.h
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
// for the encoding format.
+ Bind(&if_smi_handler);
{
- Variable var_double_value(this, MachineRepresentation::kFloat64);
- Label rebox_double(this, &var_double_value);
+ HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+ miss, support_elements);
+ }
- Node* handler_word = SmiUntag(handler);
- if (support_elements == kSupportElements) {
- Label property(this);
- Node* handler_type =
- WordAnd(handler_word, IntPtrConstant(LoadHandlerTypeBit::kMask));
- GotoUnless(
- WordEqual(handler_type, IntPtrConstant(kLoadICHandlerForElements)),
- &property);
-
- Comment("element_load");
- Node* intptr_index = TryToIntptr(p->name, miss);
- Node* elements = LoadElements(p->receiver);
- Node* is_jsarray =
- WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
- Node* is_jsarray_condition = WordNotEqual(is_jsarray, IntPtrConstant(0));
- Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
- Label if_hole(this), unimplemented_elements_kind(this);
- Label* out_of_bounds = miss;
- EmitElementLoad(p->receiver, elements, elements_kind, intptr_index,
- is_jsarray_condition, &if_hole, &rebox_double,
- &var_double_value, &unimplemented_elements_kind,
- out_of_bounds, miss);
-
- Bind(&unimplemented_elements_kind);
- {
- // Smi handlers should only be installed for supported elements kinds.
- // Crash if we get here.
- DebugBreak();
- Goto(miss);
- }
+ Bind(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss);
+ }
- Bind(&if_hole);
- {
- Comment("convert hole");
- Node* convert_hole =
- WordAnd(handler_word, IntPtrConstant(KeyedLoadConvertHole::kMask));
- GotoIf(WordEqual(convert_hole, IntPtrConstant(0)), miss);
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoUnless(
- WordEqual(
- LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
- miss);
- Return(UndefinedConstant());
- }
+ Bind(&call_handler);
+ {
+ typedef LoadWithVectorDescriptor Descriptor;
+ TailCallStub(Descriptor(isolate()), handler, p->context,
+ Arg(Descriptor::kReceiver, p->receiver),
+ Arg(Descriptor::kName, p->name),
+ Arg(Descriptor::kSlot, p->slot),
+ Arg(Descriptor::kVector, p->vector));
+ }
+}
- Bind(&property);
- Comment("property_load");
+void CodeStubAssembler::HandleLoadICSmiHandlerCase(
+ const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+ ElementSupport support_elements) {
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ Node* handler_word = SmiUntag(smi_handler);
+ Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+ if (support_elements == kSupportElements) {
+ Label property(this);
+ GotoUnless(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+ &property);
+
+ Comment("element_load");
+ Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* elements = LoadElements(holder);
+ Node* is_jsarray_condition =
+ IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+ Node* elements_kind =
+ DecodeWord<LoadHandler::ElementsKindBits>(handler_word);
+ Label if_hole(this), unimplemented_elements_kind(this);
+ Label* out_of_bounds = miss;
+ EmitElementLoad(holder, elements, elements_kind, intptr_index,
+ is_jsarray_condition, &if_hole, &rebox_double,
+ &var_double_value, &unimplemented_elements_kind,
+ out_of_bounds, miss);
+
+ Bind(&unimplemented_elements_kind);
+ {
+ // Smi handlers should only be installed for supported elements kinds.
+ // Crash if we get here.
+ DebugBreak();
+ Goto(miss);
}
- // |handler_word| is a field index as obtained by
- // FieldIndex.GetLoadByFieldOffset():
- Label inobject_double(this), out_of_object(this),
- out_of_object_double(this);
- Node* inobject_bit =
- WordAnd(handler_word, IntPtrConstant(FieldOffsetIsInobject::kMask));
- Node* double_bit =
- WordAnd(handler_word, IntPtrConstant(FieldOffsetIsDouble::kMask));
- Node* offset =
- WordSar(handler_word, IntPtrConstant(FieldOffsetOffset::kShift));
-
- GotoIf(WordEqual(inobject_bit, IntPtrConstant(0)), &out_of_object);
-
- GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &inobject_double);
- Return(LoadObjectField(p->receiver, offset));
-
- Bind(&inobject_double);
- if (FLAG_unbox_double_fields) {
- var_double_value.Bind(
- LoadObjectField(p->receiver, offset, MachineType::Float64()));
- } else {
- Node* mutable_heap_number = LoadObjectField(p->receiver, offset);
- var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ Bind(&if_hole);
+ {
+ Comment("convert hole");
+ GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ miss);
+ Return(UndefinedConstant());
}
- Goto(&rebox_double);
- Bind(&out_of_object);
- Node* properties = LoadProperties(p->receiver);
- Node* value = LoadObjectField(properties, offset);
- GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &out_of_object_double);
- Return(value);
+ Bind(&property);
+ Comment("property_load");
+ }
+
+ Label constant(this), field(this);
+ Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+ &field, &constant);
+
+ Bind(&field);
+ {
+ Comment("field_load");
+ Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+
+ Label inobject(this), out_of_object(this);
+ Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+ &out_of_object);
+
+ Bind(&inobject);
+ {
+ Label is_double(this);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(LoadObjectField(holder, offset));
+
+ Bind(&is_double);
+ if (FLAG_unbox_double_fields) {
+ var_double_value.Bind(
+ LoadObjectField(holder, offset, MachineType::Float64()));
+ } else {
+ Node* mutable_heap_number = LoadObjectField(holder, offset);
+ var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ }
+ Goto(&rebox_double);
+ }
- Bind(&out_of_object_double);
- var_double_value.Bind(LoadHeapNumberValue(value));
- Goto(&rebox_double);
+ Bind(&out_of_object);
+ {
+ Label is_double(this);
+ Node* properties = LoadProperties(holder);
+ Node* value = LoadObjectField(properties, offset);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(value);
+
+ Bind(&is_double);
+ var_double_value.Bind(LoadHeapNumberValue(value));
+ Goto(&rebox_double);
+ }
Bind(&rebox_double);
Return(AllocateHeapNumberWithValue(var_double_value.value()));
}
- // |handler| is a heap object. Must be code, call it.
- Bind(&call_handler);
- typedef LoadWithVectorDescriptor Descriptor;
- TailCallStub(Descriptor(isolate()), handler, p->context,
- Arg(Descriptor::kReceiver, p->receiver),
- Arg(Descriptor::kName, p->name),
- Arg(Descriptor::kSlot, p->slot),
- Arg(Descriptor::kVector, p->vector));
+ Bind(&constant);
+ {
+ Comment("constant_load");
+ Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+ Node* descriptor =
+ DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+ CSA_ASSERT(this,
+ UintPtrLessThan(descriptor,
+ LoadAndUntagFixedArrayBaseLength(descriptors)));
+ Node* value =
+ LoadFixedArrayElement(descriptors, descriptor, 0, INTPTR_PARAMETERS);
+
+ Label if_accessor_info(this);
+ GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+ &if_accessor_info);
+ Return(value);
+
+ Bind(&if_accessor_info);
+ Callable callable = CodeFactory::ApiGetter(isolate());
+ TailCallStub(callable, p->context, p->receiver, holder, value);
+ }
+}
+
+void CodeStubAssembler::HandleLoadICProtoHandler(
+ const LoadICParameters* p, Node* handler, Variable* var_holder,
+ Variable* var_smi_handler, Label* if_smi_handler, Label* miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+ LoadHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+ LoadHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Label check_prototypes(this);
+ GotoUnless(
+ IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+ &check_prototypes);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(
+ HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+ // We have a dictionary receiver, do a negative lookup check.
+ NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+ Goto(&check_prototypes);
+ }
+
+ Bind(&check_prototypes);
+ Node* maybe_holder_cell =
+ LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+ Bind(&tuple_handler);
+ {
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ Return(UndefinedConstant());
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // both the receiver map check and the validity cell check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+ var_holder->Bind(holder);
+ var_smi_handler->Bind(smi_handler);
+ Goto(if_smi_handler);
+ }
+
+ Bind(&array_handler);
+ {
+ typedef LoadICProtoArrayDescriptor Descriptor;
+ LoadICProtoArrayStub stub(isolate());
+ Node* target = HeapConstant(stub.GetCode());
+ TailCallStub(Descriptor(isolate()), target, p->context,
+ Arg(Descriptor::kReceiver, p->receiver),
+ Arg(Descriptor::kName, p->name),
+ Arg(Descriptor::kSlot, p->slot),
+ Arg(Descriptor::kVector, p->vector),
+ Arg(Descriptor::kHandler, handler));
+ }
+}
+
+void CodeStubAssembler::LoadICProtoArray(const LoadICParameters* p,
+ Node* handler) {
+ Label miss(this);
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+ CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+ Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
+ handler_flags, &miss);
+
+ HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
+
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+ p->slot, p->vector);
+ }
+}
+
+Node* CodeStubAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
+ Node* handler,
+ Node* handler_length,
+ Node* handler_flags,
+ Label* miss) {
+ Variable start_index(this, MachineType::PointerRepresentation());
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+ Label can_access(this);
+ GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &can_access);
+ {
+ // Skip this entry of a handler.
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+ int offset =
+ FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+ Node* expected_native_context =
+ LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+ CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+ Node* native_context = LoadNativeContext(p->context);
+ GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+ // If the receiver is not a JSGlobalProxy then we miss.
+ GotoUnless(IsJSGlobalProxy(p->receiver), miss);
+ // For JSGlobalProxy receiver try to compare security tokens of current
+ // and expected native contexts.
+ Node* expected_token = LoadContextElement(expected_native_context,
+ Context::SECURITY_TOKEN_INDEX);
+ Node* current_token =
+ LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+ Branch(WordEqual(expected_token, current_token), &can_access, miss);
+ }
+ Bind(&can_access);
+
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start_index.value(), handler_length,
+ [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+ Node* prototype_cell =
+ LoadFixedArrayElement(handler, current, 0, INTPTR_PARAMETERS);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_holder_cell = LoadFixedArrayElement(
+ handler, IntPtrConstant(LoadHandler::kHolderCellIndex), 0,
+ INTPTR_PARAMETERS);
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ Return(UndefinedConstant());
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // the receiver map check, the validity cell check and the prototype chain
+ // check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+ return holder;
+}
+
+void CodeStubAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+ Label* miss) {
+ Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+ Label done(this);
+ Label if_property_cell(this), if_dictionary_object(this);
+
+ // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+ Branch(WordEqual(LoadMap(maybe_prototype),
+ LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+ &if_property_cell, &if_dictionary_object);
+
+ Bind(&if_dictionary_object);
+ {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+ NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+ Goto(&done);
+ }
+
+ Bind(&if_property_cell);
+ {
+ // Ensure the property cell still contains the hole.
+ Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+ GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+ Goto(&done);
+ }
+
+ Bind(&done);
+}
+
+void CodeStubAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+ Label* miss) {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+ Node* properties = LoadProperties(object);
+ // Ensure the property does not exist in a dictionary-mode object.
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label done(this);
+ NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+ &done);
+ Bind(&done);
}
void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
@@ -4868,7 +6020,7 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
if_property_dictionary(this), if_found_on_receiver(this);
Node* receiver = p->receiver;
- GotoIf(WordIsSmi(receiver), &slow);
+ GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
// Receivers requiring non-standard element accesses (interceptors, access
@@ -4943,7 +6095,8 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
const int32_t kMaxLinear = 210;
Label stub_cache(this);
Node* bitfield3 = LoadMapBitField3(receiver_map);
- Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ Node* nof =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
Node* descriptors = LoadMapDescriptors(receiver_map);
Variable var_name_index(this, MachineType::PointerRepresentation());
@@ -5014,6 +6167,262 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
+void CodeStubAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+ Node* holder,
+ Representation representation,
+ Node* value, Node* transition,
+ Label* miss) {
+ bool transition_to_field = transition != nullptr;
+ Node* prepared_value = PrepareValueForWrite(value, representation, miss);
+
+ if (transition_to_field) {
+ Label storage_extended(this);
+ GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+ &storage_extended);
+ Comment("[ Extend storage");
+ ExtendPropertiesBackingStore(holder);
+ Comment("] Extend storage");
+ Goto(&storage_extended);
+
+ Bind(&storage_extended);
+ }
+
+ Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ Label if_inobject(this), if_out_of_object(this);
+ Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+ &if_out_of_object);
+
+ Bind(&if_inobject);
+ {
+ StoreNamedField(holder, offset, true, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreObjectField(holder, JSObject::kMapOffset, transition);
+ }
+ Return(value);
+ }
+
+ Bind(&if_out_of_object);
+ {
+ StoreNamedField(holder, offset, false, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreObjectField(holder, JSObject::kMapOffset, transition);
+ }
+ Return(value);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+ Node* holder, Node* value,
+ Node* transition,
+ Label* miss) {
+ Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ if (transition) {
+ CSA_ASSERT(
+ this,
+ WordOr(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant))));
+ } else {
+ CSA_ASSERT(this, WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreField)));
+ }
+#endif
+
+ Node* field_representation =
+ DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+ Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+ if_tagged_field(this);
+
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+ &if_tagged_field);
+ GotoIf(WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kHeapObject)),
+ &if_heap_object_field);
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+ &if_double_field);
+ CSA_ASSERT(this, WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kSmi)));
+ Goto(&if_smi_field);
+
+ Bind(&if_tagged_field);
+ {
+ Comment("store tagged field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+ value, transition, miss);
+ }
+
+ Bind(&if_double_field);
+ {
+ Comment("store double field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+ value, transition, miss);
+ }
+
+ Bind(&if_heap_object_field);
+ {
+ Comment("store heap object field");
+ // Generate full field type check here and then store value as Tagged.
+ Node* prepared_value =
+ PrepareValueForWrite(value, Representation::HeapObject(), miss);
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors =
+ LoadMapDescriptors(transition ? transition : LoadMap(holder));
+ Node* maybe_field_type = LoadFixedArrayElement(
+ descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+ Label do_store(this);
+ GotoIf(TaggedIsSmi(maybe_field_type), &do_store);
+ // Check that value type matches the field type.
+ {
+ Node* field_type = LoadWeakCellValue(maybe_field_type, miss);
+ Branch(WordEqual(LoadMap(prepared_value), field_type), &do_store, miss);
+ }
+ Bind(&do_store);
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+ prepared_value, transition, miss);
+ }
+
+ Bind(&if_smi_field);
+ {
+ Comment("store smi field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+ value, transition, miss);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICHandlerCase(const StoreICParameters* p,
+ Node* handler, Label* miss) {
+ Label if_smi_handler(this);
+ Label try_proto_handler(this), call_handler(this);
+
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
+ // for the encoding format.
+ Bind(&if_smi_handler);
+ {
+ Node* holder = p->receiver;
+ Node* handler_word = SmiUntag(handler);
+
+ // Handle non-transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+ }
+
+ Bind(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+ HandleStoreICProtoHandler(p, handler, miss);
+ }
+
+ // |handler| is a heap object. Must be code, call it.
+ Bind(&call_handler);
+ {
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+ Node* handler, Label* miss) {
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset ==
+ StoreHandler::kTransitionCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+ StoreHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+ StoreHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+ Node* maybe_transition_cell =
+ LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+ Variable var_transition(this, MachineRepresentation::kTagged);
+ Label if_transition(this), if_transition_to_constant(this);
+ Bind(&tuple_handler);
+ {
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&array_handler);
+ {
+ Node* length = SmiUntag(maybe_transition_cell);
+ BuildFastLoop(MachineType::PointerRepresentation(),
+ IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+ [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+ Node* prototype_cell = LoadFixedArrayElement(
+ handler, current, 0, INTPTR_PARAMETERS);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_transition_cell = LoadFixedArrayElement(
+ handler, IntPtrConstant(StoreHandler::kTransitionCellIndex), 0,
+ INTPTR_PARAMETERS);
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&if_transition);
+ {
+ Node* holder = p->receiver;
+ Node* transition = var_transition.value();
+ Node* handler_word = SmiUntag(smi_handler);
+
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant)),
+ &if_transition_to_constant);
+
+ // Handle transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+ miss);
+
+ Bind(&if_transition_to_constant);
+ {
+ // Check that constant matches value.
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors = LoadMapDescriptors(transition);
+ Node* constant = LoadFixedArrayElement(
+ descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+ GotoIf(WordNotEqual(p->value, constant), miss);
+
+ StoreObjectField(p->receiver, JSObject::kMapOffset, transition);
+ Return(p->value);
+ }
+ }
+}
+
void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
@@ -5030,9 +6439,7 @@ void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
Bind(&if_handler);
{
Comment("StoreIC_if_handler");
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
- p->name, p->value, p->slot, p->vector);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss);
}
Bind(&try_polymorphic);
@@ -5063,15 +6470,95 @@ void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
}
}
+void CodeStubAssembler::KeyedStoreIC(const StoreICParameters* p,
+ LanguageMode language_mode) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // This is to make |miss| label see the var_handler bound on all paths.
+ var_handler.Bind(IntPtrConstant(0));
+
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ try_polymorphic_name(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ Comment("KeyedStoreIC_if_handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // CheckPolymorphic case.
+ Comment("KeyedStoreIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ Label if_transition_handler(this);
+ Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+ HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+ &var_handler, &if_transition_handler,
+ &var_transition_map_cell, &miss);
+ Bind(&if_transition_handler);
+ Comment("KeyedStoreIC_polymorphic_transition");
+ Node* transition_map =
+ LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+ p->name, transition_map, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ Comment("KeyedStoreIC_try_megamorphic");
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
+ TailCallStub(
+ CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+ p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_polymorphic_name);
+ {
+ // We might have a name in feedback, and a fixed array in the next slot.
+ Comment("KeyedStoreIC_try_polymorphic_name");
+ GotoUnless(WordEqual(feedback, p->name), &miss);
+ // If the name comparison succeeded, we know we have a FixedArray with
+ // at least one map/handler pair.
+ Node* offset = ElementOffsetFromIndex(
+ p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+ FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+ Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+ 1);
+ }
+
+ Bind(&miss);
+ {
+ Comment("KeyedStoreIC_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
Label try_handler(this), miss(this);
Node* weak_cell =
LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
- AssertInstanceType(weak_cell, WEAK_CELL_TYPE);
+ CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
// Load value or try handler case if the {weak_cell} is cleared.
Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
- AssertInstanceType(property_cell, PROPERTY_CELL_TYPE);
+ CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), &miss);
@@ -5085,7 +6572,7 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
&miss);
// In this case {handler} must be a Code object.
- AssertInstanceType(handler, CODE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(handler, CODE_TYPE));
LoadWithVectorDescriptor descriptor(isolate());
Node* native_context = LoadNativeContext(p->context);
Node* receiver =
@@ -5117,8 +6604,9 @@ void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
- Assert(UintPtrLessThan(new_capacity, IntPtrConstant(kMaxNumberOfDescriptors +
- JSObject::kFieldsAdded)));
+ CSA_ASSERT(this, UintPtrLessThan(new_capacity,
+ IntPtrConstant(kMaxNumberOfDescriptors +
+ JSObject::kFieldsAdded)));
Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
@@ -5137,30 +6625,13 @@ Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
Representation representation,
Label* bailout) {
if (representation.IsDouble()) {
- Variable var_value(this, MachineRepresentation::kFloat64);
- Label if_smi(this), if_heap_object(this), done(this);
- Branch(WordIsSmi(value), &if_smi, &if_heap_object);
- Bind(&if_smi);
- {
- var_value.Bind(SmiToFloat64(value));
- Goto(&done);
- }
- Bind(&if_heap_object);
- {
- GotoUnless(
- Word32Equal(LoadInstanceType(value), Int32Constant(HEAP_NUMBER_TYPE)),
- bailout);
- var_value.Bind(LoadHeapNumberValue(value));
- Goto(&done);
- }
- Bind(&done);
- value = var_value.value();
+ value = TryTaggedToFloat64(value, bailout);
} else if (representation.IsHeapObject()) {
// Field type is checked by the handler, here we only check if the value
// is a heap object.
- GotoIf(WordIsSmi(value), bailout);
+ GotoIf(TaggedIsSmi(value), bailout);
} else if (representation.IsSmi()) {
- GotoUnless(WordIsSmi(value), bailout);
+ GotoUnless(TaggedIsSmi(value), bailout);
} else {
DCHECK(representation.IsTagged());
}
@@ -5242,7 +6713,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
bool is_load = value == nullptr;
- GotoUnless(WordIsSmi(key), bailout);
+ GotoUnless(TaggedIsSmi(key), bailout);
key = SmiUntag(key);
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
@@ -5265,7 +6736,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
Bind(&if_mapped);
{
- Assert(WordIsSmi(mapped_index));
+ CSA_ASSERT(this, TaggedIsSmi(mapped_index));
mapped_index = SmiUntag(mapped_index);
Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
INTPTR_PARAMETERS);
@@ -5277,7 +6748,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
if (is_load) {
Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
INTPTR_PARAMETERS);
- Assert(WordNotEqual(result, TheHoleConstant()));
+ CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
} else {
StoreFixedArrayElement(the_context, mapped_index, value,
@@ -5357,9 +6828,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
ParameterMode mode) {
if (IsFixedTypedArrayElementsKind(kind)) {
if (kind == UINT8_CLAMPED_ELEMENTS) {
-#ifdef DEBUG
- Assert(Word32Equal(value, Word32And(Int32Constant(0xff), value)));
-#endif
+ CSA_ASSERT(this,
+ Word32Equal(value, Word32And(Int32Constant(0xff), value)));
}
Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
@@ -5437,7 +6907,7 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
Variable var_result(this, rep);
Label done(this, &var_result), if_smi(this);
- GotoIf(WordIsSmi(input), &if_smi);
+ GotoIf(TaggedIsSmi(input), &if_smi);
// Try to convert a heap number to a Smi.
GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
{
@@ -5552,9 +7022,9 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// a smi before manipulating the backing store. Otherwise the backing store
// may be left in an invalid state.
if (IsFastSmiElementsKind(elements_kind)) {
- GotoUnless(WordIsSmi(value), bailout);
+ GotoUnless(TaggedIsSmi(value), bailout);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- value = PrepareValueForWrite(value, Representation::Double(), bailout);
+ value = TryTaggedToFloat64(value, bailout);
}
if (IsGrowStoreMode(store_mode)) {
@@ -5692,42 +7162,44 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
Node* new_space_top_address = ExternalConstant(
ExternalReference::new_space_allocation_top_address(isolate()));
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoMapOffset = JSArray::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
Node* object_page = PageFromAddress(object);
{
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- Node* page_flags = Load(MachineType::IntPtr(), object_page);
- GotoIf(
- WordEqual(WordAnd(page_flags, IntPtrConstant(mask)), IntPtrConstant(0)),
- &no_memento_found);
+ Node* page_flags = Load(MachineType::IntPtr(), object_page,
+ IntPtrConstant(Page::kFlagsOffset));
+ GotoIf(WordEqual(WordAnd(page_flags,
+ IntPtrConstant(MemoryChunk::kIsInNewSpaceMask)),
+ IntPtrConstant(0)),
+ &no_memento_found);
}
- Node* memento_end = IntPtrAdd(object, IntPtrConstant(kMementoEndOffset));
- Node* memento_end_page = PageFromAddress(memento_end);
+ Node* memento_last_word = IntPtrAdd(
+ object, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
+ Node* memento_last_word_page = PageFromAddress(memento_last_word);
Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
Node* new_space_top_page = PageFromAddress(new_space_top);
- // If the object is in new space, we need to check whether it is and
- // respective potential memento object on the same page as the current top.
- GotoIf(WordEqual(memento_end_page, new_space_top_page), &top_check);
+ // If the object is in new space, we need to check whether respective
+ // potential memento object is on the same page as the current top.
+ GotoIf(WordEqual(memento_last_word_page, new_space_top_page), &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Branch(WordEqual(object_page, memento_end_page), &map_check,
+ Branch(WordEqual(object_page, memento_last_word_page), &map_check,
&no_memento_found);
// If top is on the same page as the current object, we need to check whether
// we are below top.
Bind(&top_check);
{
- Branch(UintPtrGreaterThan(memento_end, new_space_top), &no_memento_found,
- &map_check);
+ Branch(UintPtrGreaterThanOrEqual(memento_last_word, new_space_top),
+ &no_memento_found, &map_check);
}
// Memento map check.
@@ -5747,8 +7219,9 @@ Node* CodeStubAssembler::PageFromAddress(Node* address) {
}
Node* CodeStubAssembler::EnumLength(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
Node* bitfield_3 = LoadMapBitField3(map);
- Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
+ Node* enum_length = DecodeWordFromWord32<Map::EnumLengthBits>(bitfield_3);
return SmiTag(enum_length);
}
@@ -5770,8 +7243,8 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Node* invalid_enum_cache_sentinel =
SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
Node* enum_length = EnumLength(current_map.value());
- BranchIfWordEqual(enum_length, invalid_enum_cache_sentinel, use_runtime,
- &loop);
+ Branch(WordEqual(enum_length, invalid_enum_cache_sentinel), use_runtime,
+ &loop);
}
// Check that there are no elements. |current_js_object| contains
@@ -5782,24 +7255,24 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Node* elements = LoadElements(current_js_object.value());
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
// Check that there are no elements.
- BranchIfWordEqual(elements, empty_fixed_array, &if_no_elements,
- &if_elements);
+ Branch(WordEqual(elements, empty_fixed_array), &if_no_elements,
+ &if_elements);
Bind(&if_elements);
{
// Second chance, the object may be using the empty slow element
// dictionary.
Node* slow_empty_dictionary =
LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
- BranchIfWordNotEqual(elements, slow_empty_dictionary, use_runtime,
- &if_no_elements);
+ Branch(WordNotEqual(elements, slow_empty_dictionary), use_runtime,
+ &if_no_elements);
}
Bind(&if_no_elements);
{
// Update map prototype.
current_js_object.Bind(LoadMapPrototype(current_map.value()));
- BranchIfWordEqual(current_js_object.value(), NullConstant(), use_cache,
- &next);
+ Branch(WordEqual(current_js_object.value(), NullConstant()), use_cache,
+ &next);
}
}
@@ -5808,8 +7281,8 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
// For all objects but the receiver, check that the cache is empty.
current_map.Bind(LoadMap(current_js_object.value()));
Node* enum_length = EnumLength(current_map.value());
- Node* zero_constant = SmiConstant(Smi::FromInt(0));
- BranchIf(WordEqual(enum_length, zero_constant), &loop, use_runtime);
+ Node* zero_constant = SmiConstant(Smi::kZero);
+ Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
}
}
@@ -5877,5 +7350,1999 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
return cell;
}
+void CodeStubAssembler::BuildFastLoop(
+ const CodeStubAssembler::VariableList& vars,
+ MachineRepresentation index_rep, Node* start_index, Node* end_index,
+ std::function<void(CodeStubAssembler* assembler, Node* index)> body,
+ int increment, IndexAdvanceMode mode) {
+ Variable var(this, index_rep);
+ VariableList vars_copy(vars, zone());
+ vars_copy.Add(&var, zone());
+ var.Bind(start_index);
+ Label loop(this, vars_copy);
+ Label after_loop(this);
+ // Introduce an explicit second check of the termination condition before the
+ // loop that helps turbofan generate better code. If there's only a single
+ // check, then the CodeStubAssembler forces it to be at the beginning of the
+ // loop requiring a backwards branch at the end of the loop (it's not possible
+ // to force the loop header check at the end of the loop and branch forward to
+ // it from the pre-header). The extra branch is slower in the case that the
+ // loop actually iterates.
+ Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
+ Bind(&loop);
+ {
+ if (mode == IndexAdvanceMode::kPre) {
+ var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ }
+ body(this, var.value());
+ if (mode == IndexAdvanceMode::kPost) {
+ var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ }
+ Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
+ }
+ Bind(&after_loop);
+}
+
+void CodeStubAssembler::BuildFastFixedArrayForEach(
+ compiler::Node* fixed_array, ElementsKind kind,
+ compiler::Node* first_element_inclusive,
+ compiler::Node* last_element_exclusive,
+ std::function<void(CodeStubAssembler* assembler,
+ compiler::Node* fixed_array, compiler::Node* offset)>
+ body,
+ ParameterMode mode, ForEachDirection direction) {
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ int32_t first_val;
+ bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
+ int32_t last_val;
+ bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
+ if (constant_first && constent_last) {
+ int delta = last_val - first_val;
+ DCHECK(delta >= 0);
+ if (delta <= kElementLoopUnrollThreshold) {
+ if (direction == ForEachDirection::kForward) {
+ for (int i = first_val; i < last_val; ++i) {
+ Node* index = IntPtrConstant(i);
+ Node* offset =
+ ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ body(this, fixed_array, offset);
+ }
+ } else {
+ for (int i = last_val - 1; i >= first_val; --i) {
+ Node* index = IntPtrConstant(i);
+ Node* offset =
+ ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ body(this, fixed_array, offset);
+ }
+ }
+ return;
+ }
+ }
+
+ Node* start =
+ ElementOffsetFromIndex(first_element_inclusive, kind, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* limit =
+ ElementOffsetFromIndex(last_element_exclusive, kind, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ if (direction == ForEachDirection::kReverse) std::swap(start, limit);
+
+ int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start, limit,
+ [fixed_array, body](CodeStubAssembler* assembler, Node* offset) {
+ body(assembler, fixed_array, offset);
+ },
+ direction == ForEachDirection::kReverse ? -increment : increment,
+ direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
+ : IndexAdvanceMode::kPost);
+}
+
+void CodeStubAssembler::BranchIfNumericRelationalComparison(
+ RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+ Label* if_true, Label* if_false) {
+ typedef compiler::Node Node;
+
+ Label end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ switch (mode) {
+ case kLessThan:
+ BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
+ break;
+ case kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
+ break;
+ case kGreaterThan:
+ BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
+ break;
+ case kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(lhs), HeapNumberMapConstant()));
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ switch (mode) {
+ case kLessThan:
+ Branch(Float64LessThan(lhs, rhs), if_true, if_false);
+ break;
+ case kLessThanOrEqual:
+ Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
+ break;
+ case kGreaterThan:
+ Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
+ break;
+ case kGreaterThanOrEqual:
+ Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
+ break;
+ }
+ }
+}
+
+void CodeStubAssembler::GotoUnlessNumberLessThan(compiler::Node* lhs,
+ compiler::Node* rhs,
+ Label* if_false) {
+ Label if_true(this);
+ BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
+ Bind(&if_true);
+}
+
+compiler::Node* CodeStubAssembler::RelationalComparison(
+ RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+
+ Label return_true(this), return_false(this), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(this, 2, loop_vars);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ switch (mode) {
+ case kLessThan:
+ BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
+ break;
+ case kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
+ break;
+ case kGreaterThan:
+ BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Load the HeapNumber map for later comparisons.
+ Node* number_map = HeapNumberMapConstant();
+
+ // Load the map of {lhs}.
+ Node* lhs_map = LoadMap(lhs);
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Check if the {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Convert the {lhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(lhs, hint Number) operation, as the
+ // ToNumber(lhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(WordEqual(lhs_map, rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(this), if_lhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
+
+ Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are strings.
+ switch (mode) {
+ case kLessThan:
+ result.Bind(CallStub(CodeFactory::StringLessThan(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kLessThanOrEqual:
+ result.Bind(
+ CallStub(CodeFactory::StringLessThanOrEqual(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kGreaterThan:
+ result.Bind(
+ CallStub(CodeFactory::StringGreaterThan(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kGreaterThanOrEqual:
+ result.Bind(
+ CallStub(CodeFactory::StringGreaterThanOrEqual(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String, while {rhs} is neither a Number nor a
+ // String, so we need to call ToPrimitive(rhs, hint Number) if
+ // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
+ // other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotstring);
+ {
+ // The {lhs} is neither a Number nor a String, so we need to call
+ // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
+ // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_lhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ switch (mode) {
+ case kLessThan:
+ Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
+ break;
+ case kLessThanOrEqual:
+ Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
+ break;
+ case kGreaterThan:
+ Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
+ &return_false);
+ break;
+ }
+ }
+
+ Bind(&return_true);
+ {
+ result.Bind(BooleanConstant(true));
+ Goto(&end);
+ }
+
+ Bind(&return_false);
+ {
+ result.Bind(BooleanConstant(false));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+namespace {
+
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+ CodeStubAssembler::Label* if_equal,
+ CodeStubAssembler::Label* if_notequal) {
+ // In case of abstract or strict equality checks, we need additional checks
+ // for NaN values because they are not considered equal, even if both the
+ // left and the right hand side reference exactly the same value.
+ // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
+ // seems to be what is tested in the current SIMD.js testsuite.
+
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ // Check if {value} is a Smi or a HeapObject.
+ Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_valueissmi,
+ &if_valueisnotsmi);
+
+ assembler->Bind(&if_valueisnotsmi);
+ {
+ // Load the map of {value}.
+ Node* value_map = assembler->LoadMap(value);
+
+ // Check if {value} (and therefore {rhs}) is a HeapNumber.
+ Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
+ &if_valueisnotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Convert {value} (and therefore {rhs}) to floating point value.
+ Node* value_value = assembler->LoadHeapNumberValue(value);
+
+ // Check if the HeapNumber value is a NaN.
+ assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+ }
+
+ assembler->Bind(&if_valueisnotnumber);
+ assembler->Goto(if_equal);
+ }
+
+ assembler->Bind(&if_valueissmi);
+ assembler->Goto(if_equal);
+}
+
+void GenerateEqual_Simd128Value_HeapObject(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+ compiler::Node* rhs, compiler::Node* rhs_map,
+ CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+ assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
+ if_notequal);
+}
+
+} // namespace
+
+// ES6 section 7.2.12 Abstract Equality Comparison
+compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ // This is a slightly optimized version of Object::Equals represented as
+ // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+ // change something functionality wise in here, remember to update the
+ // Object::Equals method as well.
+ typedef compiler::Node Node;
+
+ Label if_equal(this), if_notequal(this),
+ do_rhsstringtonumber(this, Label::kDeferred), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(this, 2, loop_vars);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(this), if_notsame(this);
+ Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_notsame);
+ {
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ // We have already checked for {lhs} and {rhs} being the same value, so
+ // if both are Smis when we get here they must not be equal.
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Node* number_map = HeapNumberMapConstant();
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Load the instance type of the {rhs}.
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Check if the {rhs} is a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // The {rhs} is a String and the {lhs} is a Smi; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ Goto(&do_rhsstringtonumber);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, load its number value.
+ var_rhs.Bind(LoadObjectField(rhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ {
+ // Check if the {rhs} is a Receiver.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first (passing no hint).
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
+ // and {rhs} is not observable and doesn't matter for the result, so
+ // we can just swap them and use the Smi handling above (for {lhs}
+ // being a Smi).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ Label if_lhsisstring(this), if_lhsisnumber(this),
+ if_lhsissymbol(this), if_lhsissimd128value(this),
+ if_lhsisoddball(this), if_lhsisreceiver(this);
+
+ // Both {lhs} and {rhs} are HeapObjects, load their maps
+ // and their instance types.
+ Node* lhs_map = LoadMap(lhs);
+ Node* rhs_map = LoadMap(rhs);
+
+ // Load the instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Dispatch based on the instance type of {lhs}.
+ size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+ Label* case_labels[kNumCases];
+ int32_t case_values[kNumCases];
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ case_labels[i] = new Label(this);
+ case_values[i] = i;
+ }
+ case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
+ case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
+ case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
+ case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
+ case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+ Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
+ arraysize(case_values));
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ Bind(case_labels[i]);
+ Goto(&if_lhsisstring);
+ delete case_labels[i];
+ }
+
+ Bind(&if_lhsisstring);
+ {
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are of type String, just do the
+ // string comparison then.
+ Callable callable = (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(isolate())
+ : CodeFactory::StringNotEqual(isolate());
+ result.Bind(CallStub(callable, context, lhs, rhs));
+ Goto(&end);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String and the {rhs} is some other HeapObject.
+ // Swapping {lhs} and {rhs} is not observable and doesn't matter
+ // for the result, so we can just swap them and use the String
+ // handling below (for {rhs} being a String).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // The {lhs} is a Number, the {rhs} is some other HeapObject.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // The {rhs} is a String and the {lhs} is a HeapNumber; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ Goto(&do_rhsstringtonumber);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a
+ // JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, convert it to a Smi first.
+ var_rhs.Bind(
+ LoadObjectField(rhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsisoddball);
+ {
+ // The {lhs} is an Oddball and {rhs} is some other HeapObject.
+ Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+ Node* boolean_map = BooleanMapConstant();
+ Branch(WordEqual(lhs_map, boolean_map), &if_lhsisboolean,
+ &if_lhsisnotboolean);
+
+ Bind(&if_lhsisboolean);
+ {
+ // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(WordEqual(rhs_map, boolean_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // Both {lhs} and {rhs} are distinct Boolean values.
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ {
+ // Convert the {lhs} to a Number first.
+ var_lhs.Bind(LoadObjectField(lhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnotboolean);
+ {
+ // The {lhs} is either Null or Undefined; check if the {rhs} is
+ // undetectable (i.e. either also Null or Undefined or some
+ // undetectable JSReceiver).
+ Node* rhs_bitfield = LoadMapBitField(rhs_map);
+ Branch(Word32Equal(
+ Word32And(rhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_notequal, &if_equal);
+ }
+ }
+
+ Bind(&if_lhsissymbol);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is not a JSReceiver and also not the same Symbol
+ // as the {lhs}, so this is equality check is considered false.
+ Goto(&if_notequal);
+ }
+ }
+
+ Bind(&if_lhsissimd128value);
+ {
+ // Check if the {rhs} is also a Simd128Value.
+ Label if_rhsissimd128value(this), if_rhsisnotsimd128value(this);
+ Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsissimd128value, &if_rhsisnotsimd128value);
+
+ Bind(&if_rhsissimd128value);
+ {
+ // Both {lhs} and {rhs} is a Simd128Value.
+ GenerateEqual_Simd128Value_HeapObject(
+ this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotsimd128value);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is some other Primitive.
+ Goto(&if_notequal);
+ }
+ }
+ }
+
+ Bind(&if_lhsisreceiver);
+ {
+ // Check if the {rhs} is also a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Both {lhs} and {rhs} are different JSReceiver references, so
+ // this cannot be considered equal.
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is Null or Undefined (an undetectable check
+ // is sufficient here, since we already know that {rhs} is not
+ // a JSReceiver).
+ Label if_rhsisundetectable(this),
+ if_rhsisnotundetectable(this, Label::kDeferred);
+ Node* rhs_bitfield = LoadMapBitField(rhs_map);
+ Branch(Word32Equal(
+ Word32And(rhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_rhsisnotundetectable, &if_rhsisundetectable);
+
+ Bind(&if_rhsisundetectable);
+ {
+ // Check if {lhs} is an undetectable JSReceiver.
+ Node* lhs_bitfield = LoadMapBitField(lhs_map);
+ Branch(Word32Equal(
+ Word32And(lhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_notequal, &if_equal);
+ }
+
+ Bind(&if_rhsisnotundetectable);
+ {
+ // The {rhs} is some Primitive different from Null and
+ // Undefined, need to convert {lhs} to Primitive first.
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&do_rhsstringtonumber);
+ {
+ Callable callable = CodeFactory::StringToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ Branch(Float64Equal(lhs, rhs), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_equal);
+ {
+ result.Bind(BooleanConstant(mode == kDontNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&if_notequal);
+ {
+ result.Bind(BooleanConstant(mode == kNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+ // mode; for kNegateResult mode we properly negate the result.
+ //
+ // if (lhs == rhs) {
+ // if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
+ // return true;
+ // }
+ // if (!lhs->IsSmi()) {
+ // if (lhs->IsHeapNumber()) {
+ // if (rhs->IsSmi()) {
+ // return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+ // } else if (rhs->IsHeapNumber()) {
+ // return HeapNumber::cast(rhs)->value() ==
+ // HeapNumber::cast(lhs)->value();
+ // } else {
+ // return false;
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (lhs->IsString()) {
+ // if (rhs->IsString()) {
+ // return %StringEqual(lhs, rhs);
+ // } else {
+ // return false;
+ // }
+ // } else if (lhs->IsSimd128()) {
+ // if (rhs->IsSimd128()) {
+ // return %StrictEqual(lhs, rhs);
+ // }
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (rhs->IsHeapNumber()) {
+ // return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+
+ typedef compiler::Node Node;
+
+ Label if_equal(this), if_notequal(this), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(this), if_notsame(this);
+ Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_notsame);
+ {
+ // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+ // String and Simd128Value they can still be considered equal.
+ Node* number_map = HeapNumberMapConstant();
+
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of {lhs}.
+ Node* lhs_map = LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = LoadHeapNumberValue(lhs);
+ Node* rhs_value = SmiToFloat64(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = LoadHeapNumberValue(lhs);
+ Node* rhs_value = LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ Goto(&if_notequal);
+ }
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(this), if_lhsisnotstring(this);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
+
+ Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ Callable callable = (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(isolate())
+ : CodeFactory::StringNotEqual(isolate());
+ result.Bind(CallStub(callable, context, lhs, rhs));
+ Goto(&end);
+ }
+
+ Bind(&if_rhsisnotstring);
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_lhsisnotstring);
+ {
+ // Check if {lhs} is a Simd128Value.
+ Label if_lhsissimd128value(this), if_lhsisnotsimd128value(this);
+ Branch(Word32Equal(lhs_instance_type,
+ Int32Constant(SIMD128_VALUE_TYPE)),
+ &if_lhsissimd128value, &if_lhsisnotsimd128value);
+
+ Bind(&if_lhsissimd128value);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
+ GenerateEqual_Simd128Value_HeapObject(
+ this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_lhsisnotsimd128value);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsissmi);
+ {
+ // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
+ // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
+ // HeapNumber with an equal floating point value.
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // The {rhs} could be a HeapNumber with the same value as {lhs}.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = SmiToFloat64(lhs);
+ Node* rhs_value = LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ Goto(&if_notequal);
+ }
+ }
+ }
+
+ Bind(&if_equal);
+ {
+ result.Bind(BooleanConstant(mode == kDontNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&if_notequal);
+ {
+ result.Bind(BooleanConstant(mode == kNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+// ECMA#sec-samevalue
+// This algorithm differs from the Strict Equality Comparison Algorithm in its
+// treatment of signed zeroes and NaNs.
+compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ Variable var_result(this, MachineType::PointerRepresentation());
+ Label strict_equal(this), out(this);
+
+ Node* const int_false = IntPtrConstant(0);
+ Node* const int_true = IntPtrConstant(1);
+
+ Label if_equal(this), if_notequal(this);
+ Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
+
+ Bind(&if_equal);
+ {
+ // This covers the case when {lhs} == {rhs}. We can simply return true
+ // because SameValue considers two NaNs to be equal.
+
+ var_result.Bind(int_true);
+ Goto(&out);
+ }
+
+ Bind(&if_notequal);
+ {
+ // This covers the case when {lhs} != {rhs}. We only handle numbers here
+ // and defer to StrictEqual for the rest.
+
+ Node* const lhs_float = TryTaggedToFloat64(lhs, &strict_equal);
+ Node* const rhs_float = TryTaggedToFloat64(rhs, &strict_equal);
+
+ Label if_lhsisnan(this), if_lhsnotnan(this);
+ BranchIfFloat64IsNaN(lhs_float, &if_lhsisnan, &if_lhsnotnan);
+
+ Bind(&if_lhsisnan);
+ {
+ // Return true iff {rhs} is NaN.
+
+ Node* const result =
+ Select(Float64Equal(rhs_float, rhs_float), int_false, int_true,
+ MachineType::PointerRepresentation());
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&if_lhsnotnan);
+ {
+ Label if_floatisequal(this), if_floatnotequal(this);
+ Branch(Float64Equal(lhs_float, rhs_float), &if_floatisequal,
+ &if_floatnotequal);
+
+ Bind(&if_floatisequal);
+ {
+ // We still need to handle the case when {lhs} and {rhs} are -0.0 and
+ // 0.0 (or vice versa). Compare the high word to
+ // distinguish between the two.
+
+ Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_float);
+ Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_float);
+
+ // If x is +0 and y is -0, return false.
+ // If x is -0 and y is +0, return false.
+
+ Node* const result = Word32Equal(lhs_hi_word, rhs_hi_word);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&if_floatnotequal);
+ {
+ var_result.Bind(int_false);
+ Goto(&out);
+ }
+ }
+ }
+
+ Bind(&strict_equal);
+ {
+ Node* const is_equal = StrictEqual(kDontNegateResult, lhs, rhs, context);
+ Node* const result = WordEqual(is_equal, TrueConstant());
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ Label return_undefined(this, Label::kDeferred), return_to_name(this),
+ end(this);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Node* has_property =
+ HasProperty(object, key, context, Runtime::kForInHasProperty);
+
+ Branch(WordEqual(has_property, BooleanConstant(true)), &return_to_name,
+ &return_undefined);
+
+ Bind(&return_to_name);
+ {
+ var_result.Bind(ToName(context, key));
+ Goto(&end);
+ }
+
+ Bind(&return_undefined);
+ {
+ var_result.Bind(UndefinedConstant());
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::HasProperty(
+ compiler::Node* object, compiler::Node* key, compiler::Node* context,
+ Runtime::FunctionId fallback_runtime_function_id) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(this, Label::kDeferred), return_true(this),
+ return_false(this), end(this);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name,
+ Label* next_holder, Label* if_bailout) {
+ TryHasOwnProperty(holder, holder_map, holder_instance_type, unique_name,
+ &return_true, next_holder, if_bailout);
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index,
+ Label* next_holder, Label* if_bailout) {
+ TryLookupElement(holder, holder_map, holder_instance_type, index,
+ &return_true, next_holder, if_bailout);
+ };
+
+ TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_false,
+ &call_runtime);
+
+ Variable result(this, MachineRepresentation::kTagged);
+ Bind(&return_true);
+ {
+ result.Bind(BooleanConstant(true));
+ Goto(&end);
+ }
+
+ Bind(&return_false);
+ {
+ result.Bind(BooleanConstant(false));
+ Goto(&end);
+ }
+
+ Bind(&call_runtime);
+ {
+ result.Bind(
+ CallRuntime(fallback_runtime_function_id, context, object, key));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
+ compiler::Node* context) {
+ Variable result_var(this, MachineRepresentation::kTagged);
+
+ Label return_number(this, Label::kDeferred), if_oddball(this),
+ return_function(this), return_undefined(this), return_object(this),
+ return_string(this), return_result(this);
+
+ GotoIf(TaggedIsSmi(value), &return_number);
+
+ Node* map = LoadMap(value);
+
+ GotoIf(IsHeapNumberMap(map), &return_number);
+
+ Node* instance_type = LoadMapInstanceType(map);
+
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
+
+ Node* callable_or_undetectable_mask = Word32And(
+ LoadMapBitField(map),
+ Int32Constant(1 << Map::kIsCallable | 1 << Map::kIsUndetectable));
+
+ GotoIf(Word32Equal(callable_or_undetectable_mask,
+ Int32Constant(1 << Map::kIsCallable)),
+ &return_function);
+
+ GotoUnless(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
+ &return_undefined);
+
+ GotoIf(IsJSReceiverInstanceType(instance_type), &return_object);
+
+ GotoIf(IsStringInstanceType(instance_type), &return_string);
+
+#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
+ Label return_##type(this); \
+ Node* type##_map = HeapConstant(factory()->type##_map()); \
+ GotoIf(WordEqual(map, type##_map), &return_##type);
+ SIMD128_TYPES(SIMD128_BRANCH)
+#undef SIMD128_BRANCH
+
+ CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+ result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
+ Goto(&return_result);
+
+ Bind(&return_number);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->number_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&if_oddball);
+ {
+ Node* type = LoadObjectField(value, Oddball::kTypeOfOffset);
+ result_var.Bind(type);
+ Goto(&return_result);
+ }
+
+ Bind(&return_function);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->function_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_undefined);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->undefined_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_object);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->object_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_string);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->string_string()));
+ Goto(&return_result);
+ }
+
+#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
+ Bind(&return_##type); \
+ { \
+ result_var.Bind(HeapConstant(isolate()->factory()->type##_string())); \
+ Goto(&return_result); \
+ }
+ SIMD128_TYPES(SIMD128_BIND_RETURN)
+#undef SIMD128_BIND_RETURN
+
+ Bind(&return_result);
+ return result_var.value();
+}
+
+compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
+ compiler::Node* callable,
+ compiler::Node* context) {
+ Label return_runtime(this, Label::kDeferred), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Check if no one installed @@hasInstance somewhere.
+ GotoUnless(
+ WordEqual(LoadObjectField(LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+ PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ &return_runtime);
+
+ // Check if {callable} is a valid receiver.
+ GotoIf(TaggedIsSmi(callable), &return_runtime);
+ GotoUnless(IsCallableMap(LoadMap(callable)), &return_runtime);
+
+ // Use the inline OrdinaryHasInstance directly.
+ result.Bind(OrdinaryHasInstance(context, callable, object));
+ Goto(&end);
+
+ // TODO(bmeurer): Use GetPropertyStub here once available.
+ Bind(&return_runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kInstanceOf, context, object, callable));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
+ Variable var_result(this, MachineRepresentation::kTagged),
+ var_finc_value(this, MachineRepresentation::kFloat64);
+ Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
+ Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = SmiConstant(Smi::FromInt(1));
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
+ BitcastTaggedToWord(one));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ Bind(&if_notoverflow);
+ var_result.Bind(Projection(0, pair));
+ Goto(&end);
+
+ Bind(&if_overflow);
+ {
+ var_finc_value.Bind(SmiToFloat64(value));
+ Goto(&do_finc);
+ }
+ }
+
+ Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value)));
+
+ // Load the HeapNumber value.
+ var_finc_value.Bind(LoadHeapNumberValue(value));
+ Goto(&do_finc);
+ }
+
+ Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = Float64Constant(1.0);
+ Node* finc_result = Float64Add(finc_value, one);
+ var_result.Bind(AllocateHeapNumberWithValue(finc_result));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::CreateArrayIterator(
+ compiler::Node* array, compiler::Node* array_map,
+ compiler::Node* array_type, compiler::Node* context, IterationKind mode) {
+ int kBaseMapIndex = 0;
+ switch (mode) {
+ case IterationKind::kKeys:
+ kBaseMapIndex = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
+ break;
+ case IterationKind::kValues:
+ kBaseMapIndex = Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ break;
+ case IterationKind::kEntries:
+ kBaseMapIndex = Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+ break;
+ }
+
+ // Fast Array iterator map index:
+ // (kBaseIndex + kFastIteratorOffset) + ElementsKind (for JSArrays)
+ // kBaseIndex + (ElementsKind - UINT8_ELEMENTS) (for JSTypedArrays)
+ const int kFastIteratorOffset =
+ Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ STATIC_ASSERT(kFastIteratorOffset ==
+ (Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+ // Slow Array iterator map index: (kBaseIndex + kSlowIteratorOffset)
+ const int kSlowIteratorOffset =
+ Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ STATIC_ASSERT(kSlowIteratorOffset ==
+ (Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+ // Assert: Type(array) is Object
+ CSA_ASSERT(this, IsJSReceiverInstanceType(array_type));
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Variable var_map_index(this, MachineType::PointerRepresentation());
+ Variable var_array_map(this, MachineRepresentation::kTagged);
+
+ Label return_result(this);
+ Label allocate_iterator(this);
+
+ if (mode == IterationKind::kKeys) {
+ // There are only two key iterator maps, branch depending on whether or not
+ // the receiver is a TypedArray or not.
+
+ Label if_istypedarray(this), if_isgeneric(this);
+
+ Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_istypedarray, &if_isgeneric);
+
+ Bind(&if_isgeneric);
+ {
+ Label if_isfast(this), if_isslow(this);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+ Bind(&if_isfast);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(array_map);
+ Goto(&allocate_iterator);
+ }
+
+ Bind(&if_isslow);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_istypedarray);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ } else {
+ Label if_istypedarray(this), if_isgeneric(this);
+ Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_istypedarray, &if_isgeneric);
+
+ Bind(&if_isgeneric);
+ {
+ Label if_isfast(this), if_isslow(this);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+ Bind(&if_isfast);
+ {
+ Label if_ispacked(this), if_isholey(this);
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ Branch(IsHoleyFastElementsKind(elements_kind), &if_isholey,
+ &if_ispacked);
+
+ Bind(&if_isholey);
+ {
+ // Fast holey JSArrays can treat the hole as undefined if the
+ // protector cell is valid, and the prototype chain is unchanged from
+ // its initial state (because the protector cell is only tracked for
+ // initial the Array and Object prototypes). Check these conditions
+ // here, and take the slow path if any fail.
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(
+ LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ &if_isslow);
+
+ Node* native_context = LoadNativeContext(context);
+
+ Node* prototype = LoadMapPrototype(array_map);
+ Node* array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoUnless(WordEqual(prototype, array_prototype), &if_isslow);
+
+ Node* map = LoadMap(prototype);
+ prototype = LoadMapPrototype(map);
+ Node* object_prototype = LoadContextElement(
+ native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+ GotoUnless(WordEqual(prototype, object_prototype), &if_isslow);
+
+ map = LoadMap(prototype);
+ prototype = LoadMapPrototype(map);
+ Branch(IsNull(prototype), &if_ispacked, &if_isslow);
+ }
+ Bind(&if_ispacked);
+ {
+ Node* map_index =
+ IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
+ LoadMapElementsKind(array_map));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+ map_index, IntPtrConstant(kBaseMapIndex +
+ kFastIteratorOffset)));
+ CSA_ASSERT(this, IntPtrLessThan(map_index,
+ IntPtrConstant(kBaseMapIndex +
+ kSlowIteratorOffset)));
+
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(array_map);
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_isslow);
+ {
+ Node* map_index = IntPtrAdd(IntPtrConstant(kBaseMapIndex),
+ IntPtrConstant(kSlowIteratorOffset));
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_istypedarray);
+ {
+ Node* map_index =
+ IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
+ LoadMapElementsKind(array_map));
+ CSA_ASSERT(
+ this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
+ kFastIteratorOffset)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(map_index,
+ IntPtrConstant(kBaseMapIndex)));
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&allocate_iterator);
+ {
+ Node* map =
+ LoadFixedArrayElement(LoadNativeContext(context), var_map_index.value(),
+ 0, CodeStubAssembler::INTPTR_PARAMETERS);
+ var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
+ compiler::Node* array, compiler::Node* array_map, compiler::Node* map) {
+ Node* iterator = Allocate(JSArrayIterator::kSize);
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator,
+ JSArrayIterator::kIteratedObjectOffset, array);
+ StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
+ SmiConstant(Smi::FromInt(0)));
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kIteratedObjectMapOffset, array_map);
+ return iterator;
+}
+
+compiler::Node* CodeStubAssembler::IsDetachedBuffer(compiler::Node* buffer) {
+ CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
+
+ Node* buffer_bit_field = LoadObjectField(
+ buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+ Node* was_neutered_mask = Int32Constant(JSArrayBuffer::WasNeutered::kMask);
+
+ return Word32NotEqual(Word32And(buffer_bit_field, was_neutered_mask),
+ Int32Constant(0));
+}
+
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
+ compiler::Node* argc,
+ CodeStubAssembler::ParameterMode mode)
+ : assembler_(assembler),
+ argc_(argc),
+ arguments_(nullptr),
+ fp_(assembler->LoadFramePointer()) {
+ compiler::Node* offset = assembler->ElementOffsetFromIndex(
+ argc_, FAST_ELEMENTS, mode,
+ (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
+ arguments_ = assembler_->IntPtrAddFoldConstants(fp_, offset);
+ if (mode == CodeStubAssembler::INTEGER_PARAMETERS) {
+ argc_ = assembler->ChangeInt32ToIntPtr(argc_);
+ } else if (mode == CodeStubAssembler::SMI_PARAMETERS) {
+ argc_ = assembler->SmiUntag(argc_);
+ }
+}
+
+compiler::Node* CodeStubArguments::GetReceiver() {
+ return assembler_->Load(MachineType::AnyTagged(), arguments_,
+ assembler_->IntPtrConstant(kPointerSize));
+}
+
+compiler::Node* CodeStubArguments::AtIndex(
+ compiler::Node* index, CodeStubAssembler::ParameterMode mode) {
+ typedef compiler::Node Node;
+ Node* negated_index = assembler_->IntPtrSubFoldConstants(
+ assembler_->IntPtrOrSmiConstant(0, mode), index);
+ Node* offset =
+ assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
+ return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
+}
+
+compiler::Node* CodeStubArguments::AtIndex(int index) {
+ return AtIndex(assembler_->IntPtrConstant(index));
+}
+
+void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
+ CodeStubArguments::ForEachBodyFunction body,
+ compiler::Node* first, compiler::Node* last,
+ CodeStubAssembler::ParameterMode mode) {
+ assembler_->Comment("CodeStubArguments::ForEach");
+ DCHECK_IMPLIES(first == nullptr || last == nullptr,
+ mode == CodeStubAssembler::INTPTR_PARAMETERS);
+ if (first == nullptr) {
+ first = assembler_->IntPtrOrSmiConstant(0, mode);
+ }
+ if (last == nullptr) {
+ last = argc_;
+ }
+ compiler::Node* start = assembler_->IntPtrSubFoldConstants(
+ arguments_,
+ assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
+ compiler::Node* end = assembler_->IntPtrSubFoldConstants(
+ arguments_,
+ assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
+ assembler_->BuildFastLoop(
+ vars, MachineType::PointerRepresentation(), start, end,
+ [body](CodeStubAssembler* assembler, compiler::Node* current) {
+ Node* arg = assembler->Load(MachineType::AnyTagged(), current);
+ body(assembler, arg);
+ },
+ -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+}
+
+void CodeStubArguments::PopAndReturn(compiler::Node* value) {
+ assembler_->PopAndReturn(
+ assembler_->IntPtrAddFoldConstants(argc_, assembler_->IntPtrConstant(1)),
+ value);
+}
+
+compiler::Node* CodeStubAssembler::IsFastElementsKind(
+ compiler::Node* elements_kind) {
+ return Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(LAST_FAST_ELEMENTS_KIND));
+}
+
+compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
+ compiler::Node* elements_kind) {
+ CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
+
+ // Check prototype chain if receiver does not have packed elements.
+ Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
+ return Word32Equal(holey_elements, Int32Constant(1));
+}
+
} // namespace internal
} // namespace v8