summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/builtins-arm.cc35
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc44
-rw-r--r--deps/v8/src/arm/codegen-arm.cc55
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc69
-rw-r--r--deps/v8/src/arm/lithium-arm.cc46
-rw-r--r--deps/v8/src/arm/lithium-arm.h93
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc163
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc66
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h8
-rw-r--r--deps/v8/src/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc190
11 files changed, 613 insertions, 160 deletions
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 50b6bce30b..993addca9e 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -313,7 +313,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element;
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -418,6 +418,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
+
+ __ bind(&finish);
__ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
@@ -430,8 +432,39 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Jump(lr);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(
+ r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(r3, r4);
__ b(call_generic_code);
+
+ __ bind(&not_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // r3: JSArray
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r2,
+ r9,
+ &cant_transition_map);
+ __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ RecordWriteField(r3,
+ HeapObject::kMapOffset,
+ r2,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ Label loop2;
+ __ sub(r7, r7, Operand(kPointerSize));
+ __ bind(&loop2);
+ __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ cmp(r4, r5);
+ __ b(lt, &loop2);
+ __ b(&finish);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c65f5bdf84..62e6c80271 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -3439,6 +3439,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1;
@@ -3674,17 +3679,6 @@ void CEntryStub::GenerateAheadOfTime() {
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(r0);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, r0);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3865,13 +3859,27 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ mov(r0, Operand(false, RelocInfo::NONE));
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ str(r0, MemOperand(r2));
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(r0);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(r0);
}
@@ -4912,10 +4920,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label termination_exception;
__ b(eq, &termination_exception);
- __ Throw(r0); // Expects thrown value in r0.
+ __ Throw(r0);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
+ __ ThrowUncatchable(r0);
__ bind(&failure);
// For failure and exception return null.
@@ -7059,11 +7067,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { r4, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r4, EMIT_REMEMBERED_SET },
+ { r2, r3, r4, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ r2, r3, r9, EMIT_REMEMBERED_SET },
+ { r2, r3, r9, OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
{ r6, r2, r0, EMIT_REMEMBERED_SET },
{ r2, r6, r9, EMIT_REMEMBERED_SET },
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index ce35b97c18..506f9b2d5d 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -90,11 +90,16 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
- __ push(lr);
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
+
+ __ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
@@ -117,7 +122,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
@@ -146,6 +151,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ b(&entry);
+ __ bind(&only_change_map);
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&done);
+
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
@@ -194,6 +211,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr);
+ __ bind(&done);
}
@@ -207,10 +225,15 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label entry, loop, convert_hole, gc_required;
+ Label entry, loop, convert_hole, gc_required, only_change_map;
- __ push(lr);
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
+
+ __ push(lr);
__ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
@@ -280,16 +303,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
- // Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
@@ -301,6 +314,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 2adddef111..b48e842be7 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -123,10 +123,8 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
@@ -142,7 +140,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
- if (FLAG_trace_opt) {
+ if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
@@ -331,7 +329,8 @@ void FullCodeGenerator::ClearAccumulator() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
@@ -935,6 +934,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r0, null_value);
__ b(eq, &exit);
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
@@ -956,48 +957,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next;
- // Preload a couple of values used in the loop.
- Register empty_fixed_array_value = r6;
- __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- __ LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- __ mov(r1, r0);
- __ bind(&next);
-
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ cmp(r2, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
- __ JumpIfSmi(r3, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(r3, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- __ cmp(r1, r0);
- __ b(eq, &check_prototype);
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r3, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ cmp(r1, null_value);
- __ b(ne, &next);
+ __ CheckEnumCache(null_value, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1050,6 +1010,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
// Load the current count to r0, load the length to r1.
__ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@@ -1093,7 +1054,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), r3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
+ EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
@@ -1106,7 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1114,6 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Drop(5);
// Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1524,7 +1486,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Smi::FromInt(0)));
__ push(r1);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
break;
}
}
@@ -1875,7 +1839,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1927,7 +1891,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
break;
}
}
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(r0);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 1111c67faf..a934aacd36 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -1125,6 +1125,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
}
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ return MarkAsCall(new LDeclareGlobals, instr);
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context));
@@ -2088,19 +2093,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LFastLiteral, r0), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr);
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
- HObjectLiteralGeneric* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
}
@@ -2264,4 +2268,32 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
}
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* object = UseFixed(instr->enumerable(), r0);
+ LForInPrepareMap* result = new LForInPrepareMap(object);
+ return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new LLoadFieldByIndex(object, index));
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 45043593bd..1846922dbc 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -87,11 +87,13 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
+ V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -134,8 +136,7 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteralFast) \
- V(ObjectLiteralGeneric) \
+ V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -171,7 +172,12 @@ class LCodeGen;
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
- V(ValueOf)
+ V(ValueOf) \
+ V(ForInPrepareMap) \
+ V(ForInCacheArray) \
+ V(CheckMapValue) \
+ V(LoadFieldByIndex)
+
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -1346,6 +1352,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
@@ -1909,24 +1922,24 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
};
-class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
-class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
@@ -2056,6 +2069,62 @@ class LIn: public LTemplateInstruction<1, 2, 0> {
};
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInPrepareMap(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 64ca1a37bf..8045556406 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -2873,6 +2873,16 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
}
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ __ push(cp); // The context is the first argument.
+ __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ push(scratch0());
+ __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+ __ push(scratch0());
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -4370,26 +4380,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!source.is(r2));
ASSERT(!result.is(r2));
+ // Only elements backing stores for non-COW arrays need to be copied.
+ Handle<FixedArrayBase> elements(object->elements());
+ bool has_elements = elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map();
+
// Increase the offset so that subsequent objects end up right after
- // this one.
- int current_offset = *offset;
- int size = object->map()->instance_size();
- *offset += size;
+ // this object and its backing store.
+ int object_offset = *offset;
+ int object_size = object->map()->instance_size();
+ int elements_offset = *offset + object_size;
+ int elements_size = has_elements ? elements->Size() : 0;
+ *offset += object_size + elements_size;
// Copy object header.
ASSERT(object->properties()->length() == 0);
- ASSERT(object->elements()->length() == 0 ||
- object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties();
- int header_size = size - inobject_properties * kPointerSize;
+ int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, current_offset + i));
+ if (has_elements && i == JSObject::kElementsOffset) {
+ __ add(r2, result, Operand(elements_offset));
+ } else {
+ __ ldr(r2, FieldMemOperand(source, i));
+ }
+ __ str(r2, FieldMemOperand(result, object_offset + i));
}
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
- int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -4405,10 +4424,41 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ str(r2, FieldMemOperand(result, total_offset));
}
}
+
+ // Copy elements backing store header.
+ ASSERT(!has_elements || elements->IsFixedArray());
+ if (has_elements) {
+ __ LoadHeapObject(source, elements);
+ for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+ __ ldr(r2, FieldMemOperand(source, i));
+ __ str(r2, FieldMemOperand(result, elements_offset + i));
+ }
+ }
+
+ // Copy elements backing store content.
+ ASSERT(!has_elements || elements->IsFixedArray());
+ int elements_length = has_elements ? elements->length() : 0;
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
}
-void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size();
// Allocate all objects that are part of the literal in one big
@@ -4430,12 +4480,13 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
}
-void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(r4, literals);
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
@@ -4444,7 +4495,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
__ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1);
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
@@ -4799,6 +4850,88 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
+ DeoptimizeIf(eq, instr->environment());
+
+ Register null_value = r5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r0, null_value);
+ DeoptimizeIf(eq, instr->environment());
+
+ __ tst(r0, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ DeoptimizeIf(le, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r0);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+ __ cmp(r1, ip);
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ __ LoadInstanceDescriptors(map, result);
+ __ ldr(result,
+ FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ __ ldr(result,
+ FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ cmp(result, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ cmp(map, scratch0());
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ Label out_of_object, done;
+ __ cmp(index, Operand(0));
+ __ b(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ b(&done);
+
+ __ bind(&out_of_object);
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result, FieldMemOperand(scratch,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(&done);
+}
#undef __
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 2f0e5fa459..45dd80ffb7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1281,8 +1281,7 @@ void MacroAssembler::Throw(Register value) {
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -1292,24 +1291,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(r0, Operand(false, RelocInfo::NONE));
- mov(r2, Operand(external_caught));
- str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- str(r0, MemOperand(r2));
- } else if (!value.is(r0)) {
+ if (!value.is(r0)) {
mov(r0, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
@@ -3680,6 +3664,52 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Label next;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = r6;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r7;
+ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ mov(r1, r0);
+ bind(&next);
+
+ // Check that there are no elements. Register r1 contains the
+ // current JS object we've reached through the prototype chain.
+ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ cmp(r2, empty_fixed_array_value);
+ b(ne, call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r2 for the subsequent
+ // prototype load.
+ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+ JumpIfSmi(r3, call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (r3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+ JumpIfSmi(r3, call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ cmp(r1, r0);
+ b(eq, &check_prototype);
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ cmp(r3, empty_fixed_array_value);
+ b(ne, call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ bind(&check_prototype);
+ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+ cmp(r1, null_value);
+ b(ne, &next);
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 45cca9042a..47afa93a6e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -588,12 +588,12 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopTryHandler();
- // Passes thrown value (in r0) to the handler of top of the try handler chain.
+ // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1259,6 +1259,10 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ // Expects object in r0 and returns map with validated enum cache
+ // in r0. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Register null_value, Label* call_runtime);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 1ae172c008..629c209ea2 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1277,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 512 bytes to prevent overrunning the stack when
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 512;
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 2f2c5a838d..9a0793e12f 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -3076,7 +3076,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@@ -4121,7 +4121,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4130,13 +4131,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
+ Register scratch = r4;
+ Register elements_reg = r3;
+ Register length_reg = r5;
+ Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4144,16 +4148,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ }
// Check that the key is within bounds.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
@@ -4161,10 +4162,21 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
}
// Compare smis.
__ cmp(key_reg, scratch);
- __ b(hs, &miss_force_generic);
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ // Make sure elements is a fast element array, not 'cow'.
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4202,12 +4214,80 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+ TAG_OBJECT);
+
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+ }
+
+ // Store the element at index zero.
+ __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ Ret();
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedCOWArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(length_reg, scratch);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4217,7 +4297,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
@@ -4227,6 +4308,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
+ Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4245,8 +4327,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
- __ b(hs, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
@@ -4267,6 +4354,73 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(value_reg, &value_is_smi);
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+ TAG_OBJECT);
+
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch1,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // Make sure that the backing store can hold additional elements.
+ __ ldr(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ cmp(length_reg, scratch1);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}