diff options
Diffstat (limited to 'chromium/v8/src/arm')
-rw-r--r-- | chromium/v8/src/arm/builtins-arm.cc | 20 | ||||
-rw-r--r-- | chromium/v8/src/arm/code-stubs-arm.cc | 48 | ||||
-rw-r--r-- | chromium/v8/src/arm/codegen-arm.cc | 4 | ||||
-rw-r--r-- | chromium/v8/src/arm/debug-arm.cc | 6 | ||||
-rw-r--r-- | chromium/v8/src/arm/full-codegen-arm.cc | 43 | ||||
-rw-r--r-- | chromium/v8/src/arm/lithium-arm.cc | 149 | ||||
-rw-r--r-- | chromium/v8/src/arm/lithium-arm.h | 106 | ||||
-rw-r--r-- | chromium/v8/src/arm/lithium-codegen-arm.cc | 395 | ||||
-rw-r--r-- | chromium/v8/src/arm/lithium-codegen-arm.h | 36 | ||||
-rw-r--r-- | chromium/v8/src/arm/lithium-gap-resolver-arm.cc | 2 | ||||
-rw-r--r-- | chromium/v8/src/arm/macro-assembler-arm.cc | 70 | ||||
-rw-r--r-- | chromium/v8/src/arm/macro-assembler-arm.h | 8 |
12 files changed, 498 insertions, 389 deletions
diff --git a/chromium/v8/src/arm/builtins-arm.cc b/chromium/v8/src/arm/builtins-arm.cc index 5f3a999f561..eff47e2692b 100644 --- a/chromium/v8/src/arm/builtins-arm.cc +++ b/chromium/v8/src/arm/builtins-arm.cc @@ -119,9 +119,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(r2); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(ne, "Unexpected initial map for InternalArray function"); __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(eq, "Unexpected initial map for InternalArray function"); } // Run the native code for the InternalArray function called as a normal @@ -147,9 +147,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(r2); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction); + __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, "Unexpected initial map for Array function"); } // Run the native code for the Array function called as a normal function. @@ -178,7 +178,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2); __ cmp(function, Operand(r2)); - __ Assert(eq, kUnexpectedStringFunction); + __ Assert(eq, "Unexpected String function"); } // Load the first arguments in r0 and get rid of the rest. @@ -224,10 +224,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { __ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset)); __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2)); - __ Assert(eq, kUnexpectedStringWrapperInstanceSize); + __ Assert(eq, "Unexpected string wrapper instance size"); __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); __ cmp(r4, Operand::Zero()); - __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper); + __ Assert(eq, "Unexpected unused properties of string wrapper"); } __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -471,7 +471,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // r0: offset of first field after pre-allocated fields if (FLAG_debug_code) { __ cmp(r0, r6); - __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields); + __ Assert(le, "Unexpected number of pre-allocated property fields."); } __ InitializeFieldsWithFiller(r5, r0, r7); // To allow for truncation. @@ -503,7 +503,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Done if no extra properties are to be allocated. __ b(eq, &allocated); - __ Assert(pl, kPropertyAllocationCountFailed); + __ Assert(pl, "Property allocation count failed."); // Scale the number of elements by pointer size and add the header for // FixedArrays to the start of the next object calculation from above. @@ -547,7 +547,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, } else if (FLAG_debug_code) { __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); __ cmp(r7, r8); - __ Assert(eq, kUndefinedValueNotLoaded); + __ Assert(eq, "Undefined value not loaded."); } __ b(&entry); __ bind(&loop); diff --git a/chromium/v8/src/arm/code-stubs-arm.cc b/chromium/v8/src/arm/code-stubs-arm.cc index 98a835fd1a5..ba98b963153 100644 --- a/chromium/v8/src/arm/code-stubs-arm.cc +++ b/chromium/v8/src/arm/code-stubs-arm.cc @@ -246,6 +246,17 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( } +void UnaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(UnaryOpIC_Miss); +} + + void StoreGlobalStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -509,8 +520,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { Label after_sentinel; __ JumpIfNotSmi(r3, &after_sentinel); if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; __ cmp(r3, Operand::Zero()); - __ Assert(eq, kExpected0AsASmiSentinel); + __ Assert(eq, message); } __ ldr(r3, GlobalObjectOperand()); __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); @@ -3905,9 +3917,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); if (FLAG_debug_code) { __ SmiTst(regexp_data); - __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); + __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); - __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); + __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); } // regexp_data: RegExp data (FixedArray) @@ -4249,7 +4261,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Assert that we do not have a cons or slice (indirect strings) here. // Sequential strings have already been ruled out. __ tst(r0, Operand(kIsIndirectStringMask)); - __ Assert(eq, kExternalStringExpectedButNotFound); + __ Assert(eq, "external string expected, but not found"); } __ ldr(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); @@ -4631,7 +4643,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { void StringCharCodeAtGenerator::GenerateSlow( MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); // Index is not a smi. __ bind(&index_not_smi_); @@ -4676,7 +4688,7 @@ void StringCharCodeAtGenerator::GenerateSlow( call_helper.AfterCall(masm); __ jmp(&exit_); - __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); } @@ -4706,7 +4718,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { void StringCharFromCodeGenerator::GenerateSlow( MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); + __ Abort("Unexpected fallthrough to CharFromCode slow case"); __ bind(&slow_case_); call_helper.BeforeCall(masm); @@ -4716,7 +4728,7 @@ void StringCharFromCodeGenerator::GenerateSlow( call_helper.AfterCall(masm); __ jmp(&exit_); - __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); + __ Abort("Unexpected fallthrough from CharFromCode slow case"); } @@ -4773,7 +4785,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, // Check that destination is actually word aligned if the flag says // that it is. __ tst(dest, Operand(kPointerAlignmentMask)); - __ Check(eq, kDestinationOfCopyNotAligned); + __ Check(eq, "Destination of copy not aligned."); } const int kReadAlignment = 4; @@ -5002,7 +5014,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, if (FLAG_debug_code) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(ip, candidate); - __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole); + __ Assert(eq, "oddball in string table is not undefined or the hole"); } __ jmp(&next_probe[i]); @@ -6900,7 +6912,7 @@ static void CreateArrayDispatch(MacroAssembler* masm) { } // If we reached this point there is a problem. - __ Abort(kUnexpectedElementsKindInArrayConstructor); + __ Abort("Unexpected ElementsKind in array constructor"); } @@ -6957,7 +6969,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { } // If we reached this point there is a problem. - __ Abort(kUnexpectedElementsKindInArrayConstructor); + __ Abort("Unexpected ElementsKind in array constructor"); } @@ -7018,9 +7030,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. __ tst(r3, Operand(kSmiTagMask)); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction); + __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r3, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, "Unexpected initial map for Array function"); // We should either have undefined in ebx or a valid cell Label okay_here; @@ -7029,7 +7041,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ b(eq, &okay_here); __ ldr(r3, FieldMemOperand(r2, 0)); __ cmp(r3, Operand(cell_map)); - __ Assert(eq, kExpectedPropertyCellInRegisterEbx); + __ Assert(eq, "Expected property cell in register ebx"); __ bind(&okay_here); } @@ -7132,9 +7144,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. __ tst(r3, Operand(kSmiTagMask)); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction); + __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r3, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, "Unexpected initial map for Array function"); } // Figure out the right elements kind @@ -7151,7 +7163,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { __ b(eq, &done); __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); __ Assert(eq, - kInvalidElementsKindForInternalArrayOrInternalPackedArray); + "Invalid ElementsKind for InternalArray or InternalPackedArray"); __ bind(&done); } diff --git a/chromium/v8/src/arm/codegen-arm.cc b/chromium/v8/src/arm/codegen-arm.cc index 1bcf3e3a605..7559373ee9a 100644 --- a/chromium/v8/src/arm/codegen-arm.cc +++ b/chromium/v8/src/arm/codegen-arm.cc @@ -532,7 +532,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ SmiTag(r9); __ orr(r9, r9, Operand(1)); __ CompareRoot(r9, Heap::kTheHoleValueRootIndex); - __ Assert(eq, kObjectFoundInSmiOnlyArray); + __ Assert(eq, "object found in smi-only array"); } __ Strd(r4, r5, MemOperand(r7, 8, PostIndex)); @@ -728,7 +728,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // Assert that we do not have a cons or slice (indirect strings) here. // Sequential strings have already been ruled out. __ tst(result, Operand(kIsIndirectStringMask)); - __ Assert(eq, kExternalStringExpectedButNotFound); + __ Assert(eq, "external string expected, but not found"); } // Rule out short external strings. STATIC_CHECK(kShortExternalStringTag != 0); diff --git a/chromium/v8/src/arm/debug-arm.cc b/chromium/v8/src/arm/debug-arm.cc index 108435f0a9f..7faea08034b 100644 --- a/chromium/v8/src/arm/debug-arm.cc +++ b/chromium/v8/src/arm/debug-arm.cc @@ -130,7 +130,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, if ((non_object_regs & (1 << r)) != 0) { if (FLAG_debug_code) { __ tst(reg, Operand(0xc0000000)); - __ Assert(eq, kUnableToEncodeValueAsSmi); + __ Assert(eq, "Unable to encode value as smi"); } __ SmiTag(reg); } @@ -313,12 +313,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm); + masm->Abort("LiveEdit frame dropping is not supported on arm"); } void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm); + masm->Abort("LiveEdit frame dropping is not supported on arm"); } const bool Debug::kFrameDropperSupported = false; diff --git a/chromium/v8/src/arm/full-codegen-arm.cc b/chromium/v8/src/arm/full-codegen-arm.cc index b73006a17d9..ea7b73f2fe9 100644 --- a/chromium/v8/src/arm/full-codegen-arm.cc +++ b/chromium/v8/src/arm/full-codegen-arm.cc @@ -786,9 +786,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // Check that we're not inside a with or catch context. __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); __ CompareRoot(r1, Heap::kWithContextMapRootIndex); - __ Check(ne, kDeclarationInWithContext); + __ Check(ne, "Declaration in with context."); __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); - __ Check(ne, kDeclarationInCatchContext); + __ Check(ne, "Declaration in catch context."); } } @@ -2512,7 +2512,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // Check for an uninitialized let binding. __ ldr(r2, location); __ CompareRoot(r2, Heap::kTheHoleValueRootIndex); - __ Check(eq, kLetBindingReInitialization); + __ Check(eq, "Let binding re-initialization."); } // Perform the assignment. __ str(r0, location); @@ -3473,23 +3473,23 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string, Register value, uint32_t encoding_mask) { __ SmiTst(index); - __ Check(eq, kNonSmiIndex); + __ Check(eq, "Non-smi index"); __ SmiTst(value); - __ Check(eq, kNonSmiValue); + __ Check(eq, "Non-smi value"); __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); __ cmp(index, ip); - __ Check(lt, kIndexIsTooLarge); + __ Check(lt, "Index is too large"); __ cmp(index, Operand(Smi::FromInt(0))); - __ Check(ge, kIndexIsNegative); + __ Check(ge, "Index is negative"); __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); __ cmp(ip, Operand(encoding_mask)); - __ Check(eq, kUnexpectedStringType); + __ Check(eq, "Unexpected string type"); } @@ -3849,7 +3849,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { Handle<FixedArray> jsfunction_result_caches( isolate()->native_context()->jsfunction_result_caches()); if (jsfunction_result_caches->length() <= cache_id) { - __ Abort(kAttemptToUseUndefinedCache); + __ Abort("Attempt to use undefined cache."); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); context()->Plug(r0); return; @@ -4030,7 +4030,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // elements_end: Array end. if (generate_debug_code_) { __ cmp(array_length, Operand::Zero()); - __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin); + __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin"); } __ bind(&loop); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); @@ -4349,12 +4349,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { break; } + case Token::SUB: + EmitUnaryOperation(expr, "[ UnaryOperation (SUB)"); + break; + + case Token::BIT_NOT: + EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)"); + break; + default: UNREACHABLE(); } } +void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, + const char* comment) { + // TODO(svenpanne): Allowing format strings in Comment would be nice here... + Comment cmt(masm_, comment); + UnaryOpStub stub(expr->op()); + // UnaryOpStub expects the argument to be in the + // accumulator register r0. + VisitForAccumulatorValue(expr->expression()); + SetSourcePosition(expr->position()); + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, + expr->UnaryOperationFeedbackId()); + context()->Plug(r0); +} + + void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); diff --git a/chromium/v8/src/arm/lithium-arm.cc b/chromium/v8/src/arm/lithium-arm.cc index 998b73b62e9..e9ae04a1ee8 100644 --- a/chromium/v8/src/arm/lithium-arm.cc +++ b/chromium/v8/src/arm/lithium-arm.cc @@ -437,7 +437,7 @@ LPlatformChunk* LChunkBuilder::Build() { } -void LChunkBuilder::Abort(BailoutReason reason) { +void LChunkBuilder::Abort(const char* reason) { info()->set_bailout_reason(reason); status_ = ABORTED; } @@ -593,10 +593,8 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); int argument_index_accumulator = 0; - ZoneList<HValue*> objects_to_materialize(0, zone()); instr->set_environment(CreateEnvironment(hydrogen_env, - &argument_index_accumulator, - &objects_to_materialize)); + &argument_index_accumulator)); return instr; } @@ -647,7 +645,7 @@ LUnallocated* LChunkBuilder::TempRegister() { new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); int vreg = allocator_->GetVirtualRegister(); if (!allocator_->AllocationOk()) { - Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + Abort("Out of virtual registers while trying to allocate temp register."); vreg = 0; } operand->set_virtual_register(vreg); @@ -885,7 +883,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } #endif - instr->set_position(position_); if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { instr = AssignPointerMap(instr); } @@ -901,13 +898,11 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { LEnvironment* LChunkBuilder::CreateEnvironment( HEnvironment* hydrogen_env, - int* argument_index_accumulator, - ZoneList<HValue*>* objects_to_materialize) { + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(), - argument_index_accumulator, - objects_to_materialize); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); BailoutId ast_id = hydrogen_env->ast_id(); ASSERT(!ast_id.IsNone() || hydrogen_env->frame_type() != JS_FUNCTION); @@ -922,16 +917,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment( outer, hydrogen_env->entry(), zone()); + bool needs_arguments_object_materialization = false; int argument_index = *argument_index_accumulator; - int object_index = objects_to_materialize->length(); for (int i = 0; i < hydrogen_env->length(); ++i) { if (hydrogen_env->is_special_index(i)) continue; - LOperand* op; HValue* value = hydrogen_env->values()->at(i); - if (value->IsArgumentsObject() || value->IsCapturedObject()) { - objects_to_materialize->Add(value, zone()); - op = LEnvironment::materialization_marker(); + LOperand* op = NULL; + if (value->IsArgumentsObject()) { + needs_arguments_object_materialization = true; + op = NULL; } else if (value->IsPushArgument()) { op = new(zone()) LArgument(argument_index++); } else { @@ -942,33 +937,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment( value->CheckFlag(HInstruction::kUint32)); } - for (int i = object_index; i < objects_to_materialize->length(); ++i) { - HValue* object_to_materialize = objects_to_materialize->at(i); - int previously_materialized_object = -1; - for (int prev = 0; prev < i; ++prev) { - if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) { - previously_materialized_object = prev; - break; - } - } - int length = object_to_materialize->OperandCount(); - bool is_arguments = object_to_materialize->IsArgumentsObject(); - if (previously_materialized_object >= 0) { - result->AddDuplicateObject(previously_materialized_object); - continue; - } else { - result->AddNewObject(is_arguments ? length - 1 : length, is_arguments); - } - for (int i = is_arguments ? 1 : 0; i < length; ++i) { - LOperand* op; - HValue* value = object_to_materialize->OperandAt(i); - if (value->IsArgumentsObject() || value->IsCapturedObject()) { - objects_to_materialize->Add(value, zone()); - op = LEnvironment::materialization_marker(); - } else { - ASSERT(!value->IsPushArgument()); - op = UseAny(value); - } + if (needs_arguments_object_materialization) { + HArgumentsObject* arguments = hydrogen_env->entry() == NULL + ? graph()->GetArgumentsObject() + : hydrogen_env->entry()->arguments_object(); + ASSERT(arguments->IsLinked()); + for (int i = 1; i < arguments->arguments_count(); ++i) { + HValue* value = arguments->arguments_values()->at(i); + ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument()); + LOperand* op = UseAny(value); result->AddValue(op, value->representation(), value->CheckFlag(HInstruction::kUint32)); @@ -1348,6 +1325,15 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { } +LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { + ASSERT(instr->value()->representation().IsInteger32()); + ASSERT(instr->representation().IsInteger32()); + if (instr->HasNoUses()) return NULL; + LOperand* value = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new(zone()) LBitNotI(value)); +} + + LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { if (instr->representation().IsDouble()) { return DoArithmeticD(Token::DIV, instr); @@ -1709,8 +1695,9 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + ASSERT(instr->left()->representation().IsSmiOrInteger32()); + ASSERT(instr->left()->representation().Equals( + instr->right()->representation())); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); @@ -1733,13 +1720,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( } -LInstruction* LChunkBuilder::DoCompareHoleAndBranch( - HCompareHoleAndBranch* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return new(zone()) LCmpHoleAndBranch(object); -} - - LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); @@ -1852,6 +1832,17 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { } +LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoInductionVariableAnnotation( + HInductionVariableAnnotation* instr) { + return NULL; +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = UseRegister(instr->length()); @@ -2025,6 +2016,19 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { } +LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { + LUnallocated* temp1 = NULL; + LOperand* temp2 = NULL; + if (!instr->CanOmitPrototypeChecks()) { + temp1 = TempRegister(); + temp2 = TempRegister(); + } + LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + if (instr->CanOmitPrototypeChecks()) return result; + return AssignEnvironment(result); +} + + LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LOperand* value = UseRegisterAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); @@ -2033,16 +2037,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); - } - return result; + if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value()); + LInstruction* result = new(zone()) LCheckMaps(value); + if (instr->CanOmitMapChecks()) return result; + return AssignEnvironment(result); } @@ -2153,6 +2151,23 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { } +LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic( + HLoadNamedFieldPolymorphic* instr) { + ASSERT(instr->representation().IsTagged()); + if (instr->need_generic()) { + LOperand* obj = UseFixed(instr->object(), r0); + LLoadNamedFieldPolymorphic* result = + new(zone()) LLoadNamedFieldPolymorphic(obj); + return MarkAsCall(DefineFixed(result, r0), instr); + } else { + LOperand* obj = UseRegisterAtStart(instr->object()); + LLoadNamedFieldPolymorphic* result = + new(zone()) LLoadNamedFieldPolymorphic(obj); + return AssignEnvironment(DefineAsRegister(result)); + } +} + + LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* object = UseFixed(instr->object(), r0); LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0); @@ -2307,7 +2322,7 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento( LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool is_in_object = instr->access().IsInobject(); bool needs_write_barrier = instr->NeedsWriteBarrier(); - bool needs_write_barrier_for_map = instr->has_transition() && + bool needs_write_barrier_for_map = !instr->transition().is_null() && instr->NeedsWriteBarrierForMap(); LOperand* obj; @@ -2427,7 +2442,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. if (spill_index > LUnallocated::kMaxFixedSlotIndex) { - Abort(kTooManySpillSlotsNeededForOSR); + Abort("Too many spill slots needed for OSR"); spill_index = 0; } return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); @@ -2449,12 +2464,6 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { } -LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { - // There are no real uses of a captured object. - return NULL; -} - - LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); diff --git a/chromium/v8/src/arm/lithium-arm.h b/chromium/v8/src/arm/lithium-arm.h index d81dc0f57cd..c568ad6f95e 100644 --- a/chromium/v8/src/arm/lithium-arm.h +++ b/chromium/v8/src/arm/lithium-arm.h @@ -50,6 +50,7 @@ class LCodeGen; V(ArithmeticD) \ V(ArithmeticT) \ V(BitI) \ + V(BitNotI) \ V(BoundsCheck) \ V(Branch) \ V(CallConstantFunction) \ @@ -67,6 +68,7 @@ class LCodeGen; V(CheckNonSmi) \ V(CheckMaps) \ V(CheckMapValue) \ + V(CheckPrototypeMaps) \ V(CheckSmi) \ V(ClampDToUint8) \ V(ClampIToUint8) \ @@ -74,7 +76,6 @@ class LCodeGen; V(ClassOfTestAndBranch) \ V(CompareNumericAndBranch) \ V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ V(CmpMapAndBranch) \ V(CmpT) \ V(ConstantD) \ @@ -127,6 +128,7 @@ class LCodeGen; V(LoadKeyed) \ V(LoadKeyedGeneric) \ V(LoadNamedField) \ + V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ V(MathAbs) \ @@ -208,12 +210,9 @@ class LCodeGen; class LInstruction: public ZoneObject { public: LInstruction() - : environment_(NULL), - hydrogen_value_(NULL), - bit_field_(IsCallBits::encode(false)) { - set_position(RelocInfo::kNoPosition); - } - + : environment_(NULL), + hydrogen_value_(NULL), + is_call_(false) { } virtual ~LInstruction() { } virtual void CompileToNative(LCodeGen* generator) = 0; @@ -252,30 +251,20 @@ class LInstruction: public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - // The 31 bits PositionBits is used to store the int position value. And the - // position value may be RelocInfo::kNoPosition (-1). The accessor always - // +1/-1 so that the encoded value of position in bit_field_ is always >= 0 - // and can fit into the 31 bits PositionBits. - void set_position(int pos) { - bit_field_ = PositionBits::update(bit_field_, pos + 1); - } - int position() { return PositionBits::decode(bit_field_) - 1; } - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } - void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } - bool IsCall() const { return IsCallBits::decode(bit_field_); } + void MarkAsCall() { is_call_ = true; } // Interface to the register allocator and iterators. - bool ClobbersTemps() const { return IsCall(); } - bool ClobbersRegisters() const { return IsCall(); } - bool ClobbersDoubleRegisters() const { return IsCall(); } + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + bool ClobbersDoubleRegisters() const { return is_call_; } // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return IsCall(); } + bool IsMarkedAsCall() const { return is_call_; } virtual bool HasResult() const = 0; virtual LOperand* result() const = 0; @@ -299,13 +288,10 @@ class LInstruction: public ZoneObject { virtual int TempCount() = 0; virtual LOperand* TempAt(int i) = 0; - class IsCallBits: public BitField<bool, 0, 1> {}; - class PositionBits: public BitField<int, 1, 31> {}; - LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; HValue* hydrogen_value_; - int bit_field_; + bool is_call_; }; @@ -895,24 +881,12 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") + DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, + "cmp-object-eq-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) }; -class LCmpHoleAndBranch: public LControlInstruction<1, 0> { - public: - explicit LCmpHoleAndBranch(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) -}; - - class LIsObjectAndBranch: public LControlInstruction<1, 1> { public: LIsObjectAndBranch(LOperand* value, LOperand* temp) { @@ -1404,6 +1378,18 @@ class LThrow: public LTemplateInstruction<0, 1, 0> { }; +class LBitNotI: public LTemplateInstruction<1, 1, 0> { + public: + explicit LBitNotI(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i") +}; + + class LAddI: public LTemplateInstruction<1, 2, 0> { public: LAddI(LOperand* left, LOperand* right) { @@ -1539,6 +1525,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { }; +class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadNamedFieldPolymorphic(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic") + DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic) +}; + + class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedGeneric(LOperand* object) { @@ -2151,7 +2150,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { virtual void PrintDataTo(StringStream* stream); - Handle<Map> transition() const { return hydrogen()->transition_map(); } + Handle<Map> transition() const { return hydrogen()->transition(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2353,6 +2352,26 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; +class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { + public: + LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { + temps_[0] = temp; + temps_[1] = temp2; + } + + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") + DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) + + ZoneList<Handle<JSObject> >* prototypes() const { + return hydrogen()->prototypes(); + } + ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); } +}; + + class LCheckSmi: public LTemplateInstruction<1, 1, 0> { public: explicit LCheckSmi(LOperand* value) { @@ -2651,7 +2670,7 @@ class LChunkBuilder BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } - void Abort(BailoutReason reason); + void Abort(const char* reason); // Methods for getting operands for Use / Define / Temp. LUnallocated* ToUnallocated(Register reg); @@ -2733,8 +2752,7 @@ class LChunkBuilder BASE_EMBEDDED { CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, - int* argument_index_accumulator, - ZoneList<HValue*>* objects_to_materialize); + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/chromium/v8/src/arm/lithium-codegen-arm.cc b/chromium/v8/src/arm/lithium-codegen-arm.cc index 9ec80f819a0..cf1e7c70f5f 100644 --- a/chromium/v8/src/arm/lithium-codegen-arm.cc +++ b/chromium/v8/src/arm/lithium-codegen-arm.cc @@ -91,7 +91,7 @@ void LCodeGen::FinishCode(Handle<Code> code) { } -void LCodeGen::Abort(BailoutReason reason) { +void LCodeGen::Abort(const char* reason) { info()->set_bailout_reason(reason); status_ = ABORTED; } @@ -274,8 +274,6 @@ bool LCodeGen::GenerateBody() { instr->Mnemonic()); } - RecordAndUpdatePosition(instr->position()); - instr->CompileToNative(this); } EnsureSpaceForLazyDeopt(); @@ -289,10 +287,6 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; - - int pos = instructions_->at(code->instruction_index())->position(); - RecordAndUpdatePosition(pos); - Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", code->instruction_index(), @@ -340,7 +334,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { // 32bit data after it. if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + deopt_jump_table_.length() * 7)) { - Abort(kGeneratedCodeIsTooLarge); + Abort("Generated code is too large"); } if (deopt_jump_table_.length() > 0) { @@ -429,7 +423,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { ASSERT(literal->IsNumber()); __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); } else if (r.IsDouble()) { - Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); + Abort("EmitLoadRegister: Unsupported double immediate."); } else { ASSERT(r.IsTagged()); __ LoadObject(scratch, literal); @@ -467,9 +461,9 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, __ vcvt_f64_s32(dbl_scratch, flt_scratch); return dbl_scratch; } else if (r.IsDouble()) { - Abort(kUnsupportedDoubleImmediate); + Abort("unsupported double immediate"); } else if (r.IsTagged()) { - Abort(kUnsupportedTaggedImmediate); + Abort("unsupported tagged immediate"); } } else if (op->IsStackSlot() || op->IsArgument()) { // TODO(regis): Why is vldr not taking a MemOperand? @@ -540,14 +534,14 @@ Operand LCodeGen::ToOperand(LOperand* op) { ASSERT(constant->HasInteger32Value()); return Operand(constant->Integer32Value()); } else if (r.IsDouble()) { - Abort(kToOperandUnsupportedDoubleImmediate); + Abort("ToOperand Unsupported double immediate."); } ASSERT(r.IsTagged()); return Operand(constant->handle()); } else if (op->IsRegister()) { return Operand(ToRegister(op)); } else if (op->IsDoubleRegister()) { - Abort(kToOperandIsDoubleRegisterUnimplemented); + Abort("ToOperand IsDoubleRegister unimplemented"); return Operand::Zero(); } // Stack slots not implemented, use ToMemOperand instead. @@ -611,57 +605,37 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, break; } - int object_index = 0; - int dematerialized_index = 0; for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); - AddToTranslation(environment, - translation, + + // TODO(mstarzinger): Introduce marker operands to indicate that this value + // is not present and must be reconstructed from the deoptimizer. Currently + // this is only used for the arguments object. + if (value == NULL) { + int arguments_count = environment->values()->length() - translation_size; + translation->BeginArgumentsObject(arguments_count); + for (int i = 0; i < arguments_count; ++i) { + LOperand* value = environment->values()->at(translation_size + i); + AddToTranslation(translation, + value, + environment->HasTaggedValueAt(translation_size + i), + environment->HasUint32ValueAt(translation_size + i)); + } + continue; + } + + AddToTranslation(translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i), - &object_index, - &dematerialized_index); + environment->HasUint32ValueAt(i)); } } -void LCodeGen::AddToTranslation(LEnvironment* environment, - Translation* translation, +void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer) { - if (op == LEnvironment::materialization_marker()) { - int object_index = (*object_index_pointer)++; - if (environment->ObjectIsDuplicateAt(object_index)) { - int dupe_of = environment->ObjectDuplicateOfAt(object_index); - translation->DuplicateObject(dupe_of); - return; - } - int object_length = environment->ObjectLengthAt(object_index); - if (environment->ObjectIsArgumentsAt(object_index)) { - translation->BeginArgumentsObject(object_length); - } else { - translation->BeginCapturedObject(object_length); - } - int dematerialized_index = *dematerialized_index_pointer; - int env_offset = environment->translation_size() + dematerialized_index; - *dematerialized_index_pointer += object_length; - for (int i = 0; i < object_length; ++i) { - LOperand* value = environment->values()->at(env_offset + i); - AddToTranslation(environment, - translation, - value, - environment->HasTaggedValueAt(env_offset + i), - environment->HasUint32ValueAt(env_offset + i), - object_index_pointer, - dematerialized_index_pointer); - } - return; - } - + bool is_uint32) { if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -788,7 +762,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, } -void LCodeGen::DeoptimizeIf(Condition condition, +void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment, Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); @@ -798,7 +772,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { - Abort(kBailoutWasNotPrepared); + Abort("bailout was not prepared"); return; } @@ -811,12 +785,12 @@ void LCodeGen::DeoptimizeIf(Condition condition, return; } - if (info()->ShouldTrapOnDeopt()) { - __ stop("trap_on_deopt", condition); + if (FLAG_trap_on_deopt && info()->IsOptimizing()) { + __ stop("trap_on_deopt", cc); } ASSERT(info()->IsStub() || frame_is_built_); - if (condition == al && frame_is_built_) { + if (cc == al && frame_is_built_) { __ Call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last @@ -830,17 +804,17 @@ void LCodeGen::DeoptimizeIf(Condition condition, !frame_is_built_); deopt_jump_table_.Add(table_entry, zone()); } - __ b(condition, &deopt_jump_table_.last().label); + __ b(cc, &deopt_jump_table_.last().label); } } -void LCodeGen::DeoptimizeIf(Condition condition, +void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; - DeoptimizeIf(condition, environment, bailout_type); + DeoptimizeIf(cc, environment, bailout_type); } @@ -1003,14 +977,6 @@ void LCodeGen::RecordPosition(int position) { } -void LCodeGen::RecordAndUpdatePosition(int position) { - if (position >= 0 && position != old_position_) { - masm()->positions_recorder()->RecordPosition(position); - old_position_ = position; - } -} - - static const char* LabelType(LLabel* label) { if (label->is_loop_header()) return " (loop header)"; if (label->is_osr_entry()) return " (OSR entry)"; @@ -1703,11 +1669,7 @@ void LCodeGen::DoBitI(LBitI* instr) { __ orr(result, left, right); break; case Token::BIT_XOR: - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { - __ mvn(result, Operand(left)); - } else { - __ eor(result, left, right); - } + __ eor(result, left, right); break; default: UNREACHABLE(); @@ -1974,7 +1936,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, kUnexpectedStringType); + __ Check(eq, "Unexpected string type"); } __ add(ip, @@ -1991,6 +1953,13 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { } +void LCodeGen::DoBitNotI(LBitNotI* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + __ mvn(result, Operand(input)); +} + + void LCodeGen::DoThrow(LThrow* instr) { Register input_reg = EmitLoadRegister(instr->value(), ip); __ push(input_reg); @@ -2152,32 +2121,25 @@ int LCodeGen::GetNextEmittedBlock() const { } template<class InstrType> -void LCodeGen::EmitBranch(InstrType instr, Condition condition) { +void LCodeGen::EmitBranch(InstrType instr, Condition cc) { int left_block = instr->TrueDestination(chunk_); int right_block = instr->FalseDestination(chunk_); int next_block = GetNextEmittedBlock(); - if (right_block == left_block || condition == al) { + if (right_block == left_block || cc == al) { EmitGoto(left_block); } else if (left_block == next_block) { - __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); + __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); } else if (right_block == next_block) { - __ b(condition, chunk_->GetAssemblyLabel(left_block)); + __ b(cc, chunk_->GetAssemblyLabel(left_block)); } else { - __ b(condition, chunk_->GetAssemblyLabel(left_block)); + __ b(cc, chunk_->GetAssemblyLabel(left_block)); __ b(chunk_->GetAssemblyLabel(right_block)); } } -template<class InstrType> -void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { - int false_block = instr->FalseDestination(chunk_); - __ b(condition, chunk_->GetAssemblyLabel(false_block)); -} - - void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } @@ -2433,26 +2395,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { } -void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { - if (instr->hydrogen()->representation().IsTagged()) { - Register input_reg = ToRegister(instr->object()); - __ mov(ip, Operand(factory()->the_hole_value())); - __ cmp(input_reg, ip); - EmitBranch(instr, eq); - return; - } - - DwVfpRegister input_reg = ToDoubleRegister(instr->object()); - __ VFPCompareAndSetFlags(input_reg, input_reg); - EmitFalseBranch(instr, vc); - - Register scratch = scratch0(); - __ VmovHigh(scratch, input_reg); - __ cmp(scratch, Operand(kHoleNanUpper32)); - EmitBranch(instr, eq); -} - - Condition LCodeGen::EmitIsObject(Register input, Register temp1, Label* is_not_object, @@ -3079,6 +3021,91 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } +void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, + Register object, + Handle<Map> type, + Handle<String> name, + LEnvironment* env) { + LookupResult lookup(isolate()); + type->LookupDescriptor(NULL, *name, &lookup); + ASSERT(lookup.IsFound() || lookup.IsCacheable()); + if (lookup.IsField()) { + int index = lookup.GetLocalFieldIndexFromMap(*type); + int offset = index * kPointerSize; + if (index < 0) { + // Negative property indices are in-object properties, indexed + // from the end of the fixed part of the object. + __ ldr(result, FieldMemOperand(object, offset + type->instance_size())); + } else { + // Non-negative property indices are in the properties array. + __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); + __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); + } + } else if (lookup.IsConstant()) { + Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate()); + __ LoadObject(result, constant); + } else { + // Negative lookup. + // Check prototypes. + Handle<HeapObject> current(HeapObject::cast((*type)->prototype())); + Heap* heap = type->GetHeap(); + while (*current != heap->null_value()) { + __ LoadHeapObject(result, current); + __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset)); + __ cmp(result, Operand(Handle<Map>(current->map()))); + DeoptimizeIf(ne, env); + current = + Handle<HeapObject>(HeapObject::cast(current->map()->prototype())); + } + __ LoadRoot(result, Heap::kUndefinedValueRootIndex); + } +} + + +void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { + Register object = ToRegister(instr->object()); + Register result = ToRegister(instr->result()); + Register object_map = scratch0(); + + int map_count = instr->hydrogen()->types()->length(); + bool need_generic = instr->hydrogen()->need_generic(); + + if (map_count == 0 && !need_generic) { + DeoptimizeIf(al, instr->environment()); + return; + } + Handle<String> name = instr->hydrogen()->name(); + Label done; + __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); + for (int i = 0; i < map_count; ++i) { + bool last = (i == map_count - 1); + Handle<Map> map = instr->hydrogen()->types()->at(i); + Label check_passed; + __ CompareMap(object_map, map, &check_passed); + if (last && !need_generic) { + DeoptimizeIf(ne, instr->environment()); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); + } else { + Label next; + __ b(ne, &next); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); + __ b(&done); + __ bind(&next); + } + } + if (need_generic) { + __ mov(r2, Operand(name)); + Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); + } + __ bind(&done); +} + + void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -3173,7 +3200,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { if (key_is_constant) { constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); + Abort("array index constant value too big."); } } else { key = ToRegister(instr->key()); @@ -3257,7 +3284,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (key_is_constant) { constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); + Abort("array index constant value too big."); } } else { key = ToRegister(instr->key()); @@ -3518,7 +3545,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { void LCodeGen::DoPushArgument(LPushArgument* instr) { LOperand* argument = instr->value(); if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); + Abort("DoPushArgument not implemented for double type."); } else { Register argument_reg = EmitLoadRegister(argument, ip); __ push(argument_reg); @@ -3738,7 +3765,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); __ vabs(result, input); - } else if (r.IsSmiOrInteger32()) { + } else if (r.IsInteger32()) { EmitIntegerMathAbs(instr); } else { // Representation is tagged. @@ -4251,14 +4278,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { +void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { if (FLAG_debug_code && check->hydrogen()->skip_check()) { Label done; - __ b(NegateCondition(condition), &done); + __ b(NegateCondition(cc), &done); __ stop("eliminated bounds check failed"); __ bind(&done); } else { - DeoptimizeIf(condition, check->environment()); + DeoptimizeIf(cc, check->environment()); } } @@ -4292,7 +4319,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (key_is_constant) { constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); + Abort("array index constant value too big."); } } else { key = ToRegister(instr->key()); @@ -4365,7 +4392,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { if (key_is_constant) { constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { - Abort(kArrayIndexConstantValueTooBig); + Abort("array index constant value too big."); } } else { key = ToRegister(instr->key()); @@ -4388,7 +4415,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { if (masm()->emit_debug_code()) { __ vmrs(ip); __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); - __ Assert(ne, kDefaultNaNModeNotSet); + __ Assert(ne, "Default NaN mode not set"); } __ VFPCanonicalizeNaN(value); } @@ -4489,13 +4516,12 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, GetLinkRegisterState(), kDontSaveFPRegs); } else { - PushSafepointRegistersScope scope( - this, Safepoint::kWithRegistersAndDoubles); + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ Move(r0, object_reg); __ Move(r1, to_map); TransitionElementsKindStub stub(from_kind, to_kind); __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles( + RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } __ bind(¬_applicable); @@ -4783,6 +4809,29 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Register temp1 = ToRegister(instr->temp()); Register temp2 = ToRegister(instr->temp2()); + bool convert_hole = false; + HValue* change_input = instr->hydrogen()->value(); + if (change_input->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(change_input); + convert_hole = load->UsesMustHandleHole(); + } + + Label no_special_nan_handling; + Label done; + if (convert_hole) { + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); + __ VFPCompareAndSetFlags(input_reg, input_reg); + __ b(vc, &no_special_nan_handling); + __ VmovHigh(scratch, input_reg); + __ cmp(scratch, Operand(kHoleNanUpper32)); + // If not the hole NaN, force the NaN to be canonical. + __ VFPCanonicalizeNaN(input_reg, ne); + __ b(ne, &no_special_nan_handling); + __ Move(reg, factory()->the_hole_value()); + __ b(&done); + } + + __ bind(&no_special_nan_handling); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); @@ -4796,6 +4845,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ vstr(input_reg, reg, HeapNumber::kValueOffset); // Now that we have finished with the object's real address tag it __ add(reg, reg, Operand(kHeapObjectTag)); + __ bind(&done); } @@ -4835,7 +4885,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, DwVfpRegister result_reg, - bool can_convert_undefined_to_nan, + bool allow_undefined_as_nan, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { @@ -4845,7 +4895,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Label load_smi, heap_number, done; - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE > + NUMBER_CANDIDATE_IS_ANY_TAGGED); + if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); @@ -4853,7 +4905,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(scratch, Operand(ip)); - if (!can_convert_undefined_to_nan) { + if (!allow_undefined_as_nan) { DeoptimizeIf(ne, env); } else { Label heap_number, convert; @@ -4862,6 +4914,11 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Convert undefined (and hole) to NaN. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(input_reg, Operand(ip)); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) { + __ b(eq, &convert); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(input_reg, Operand(ip)); + } DeoptimizeIf(ne, env); __ bind(&convert); @@ -5002,12 +5059,21 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { Register input_reg = ToRegister(input); DwVfpRegister result_reg = ToDoubleRegister(result); + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; HValue* value = instr->hydrogen()->value(); - NumberUntagDMode mode = value->representation().IsSmi() - ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; + if (value->type().IsSmi()) { + mode = NUMBER_CANDIDATE_IS_SMI; + } else if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE; + } + } + } EmitNumberUntagD(input_reg, result_reg, - instr->hydrogen()->can_convert_undefined_to_nan(), + instr->hydrogen()->allow_undefined_as_nan(), instr->hydrogen()->deoptimize_on_minus_zero(), instr->environment(), mode); @@ -5137,7 +5203,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { AllowDeferredHandleDereference smi_check; if (isolate()->heap()->InNewSpace(*target)) { Register reg = ToRegister(instr->value()); - Handle<Cell> cell = isolate()->factory()->NewCell(target); + Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target); __ mov(ip, Operand(Handle<Object>(cell))); __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); __ cmp(reg, ip); @@ -5148,67 +5214,33 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { } -void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { - { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ push(object); - CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr); - __ StoreToSafepointRegisterSlot(r0, scratch0()); - } - __ tst(scratch0(), Operand(kSmiTagMask)); - DeoptimizeIf(eq, instr->environment()); +void LCodeGen::DoCheckMapCommon(Register map_reg, + Handle<Map> map, + LEnvironment* env) { + Label success; + __ CompareMap(map_reg, map, &success); + DeoptimizeIf(ne, env); + __ bind(&success); } void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - class DeferredCheckMaps: public LDeferredCode { - public: - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) - : LDeferredCode(codegen), instr_(instr), object_(object) { - SetExit(check_maps()); - } - virtual void Generate() { - codegen()->DoDeferredInstanceMigration(instr_, object_); - } - Label* check_maps() { return &check_maps_; } - virtual LInstruction* instr() { return instr_; } - private: - LCheckMaps* instr_; - Label check_maps_; - Register object_; - }; - if (instr->hydrogen()->CanOmitMapChecks()) return; Register map_reg = scratch0(); - LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); + Label success; SmallMapList* map_set = instr->hydrogen()->map_set(); __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); - - DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg); - __ bind(deferred->check_maps()); - } - - Label success; for (int i = 0; i < map_set->length() - 1; i++) { Handle<Map> map = map_set->at(i); __ CompareMap(map_reg, map, &success); __ b(eq, &success); } - Handle<Map> map = map_set->last(); - __ CompareMap(map_reg, map, &success); - if (instr->hydrogen()->has_migration_target()) { - __ b(ne, deferred->entry()); - } else { - DeoptimizeIf(ne, instr->environment()); - } - + DoCheckMapCommon(map_reg, map, instr->environment()); __ bind(&success); } @@ -5263,6 +5295,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { } +void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { + if (instr->hydrogen()->CanOmitPrototypeChecks()) return; + + Register prototype_reg = ToRegister(instr->temp()); + Register map_reg = ToRegister(instr->temp2()); + + ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); + ZoneList<Handle<Map> >* maps = instr->maps(); + + ASSERT(prototypes->length() == maps->length()); + + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(prototype_reg, prototypes->at(i)); + __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); + DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); + } +} + + void LCodeGen::DoAllocate(LAllocate* instr) { class DeferredAllocate: public LDeferredCode { public: @@ -5597,8 +5648,6 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { if (info()->IsStub() && type == Deoptimizer::EAGER) { type = Deoptimizer::LAZY; } - - Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); DeoptimizeIf(al, instr->environment(), type); } diff --git a/chromium/v8/src/arm/lithium-codegen-arm.h b/chromium/v8/src/arm/lithium-codegen-arm.h index d0bfcbbb94e..21f792153ba 100644 --- a/chromium/v8/src/arm/lithium-codegen-arm.h +++ b/chromium/v8/src/arm/lithium-codegen-arm.h @@ -66,8 +66,7 @@ class LCodeGen BASE_EMBEDDED { frame_is_built_(false), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - old_position_(RelocInfo::kNoPosition) { + expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } @@ -116,7 +115,7 @@ class LCodeGen BASE_EMBEDDED { DwVfpRegister EmitLoadDoubleRegister(LOperand* op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch); - int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; + int ToRepresentation(LConstantOperand* op, const Representation& r) const; int32_t ToInteger32(LConstantOperand* op) const; Smi* ToSmi(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; @@ -155,7 +154,8 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); - void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + + void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -214,7 +214,7 @@ class LCodeGen BASE_EMBEDDED { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); + void Abort(const char* reason); void FPRINTF_CHECKING Comment(const char* format, ...); void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } @@ -281,19 +281,16 @@ class LCodeGen BASE_EMBEDDED { void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); - void DeoptimizeIf(Condition condition, + void DeoptimizeIf(Condition cc, LEnvironment* environment, Deoptimizer::BailoutType bailout_type); - void DeoptimizeIf(Condition condition, LEnvironment* environment); - void ApplyCheckIf(Condition condition, LBoundsCheck* check); + void DeoptimizeIf(Condition cc, LEnvironment* environment); + void ApplyCheckIf(Condition cc, LBoundsCheck* check); - void AddToTranslation(LEnvironment* environment, - Translation* translation, + void AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, - bool is_uint32, - int* object_index_pointer, - int* dematerialized_index_pointer); + bool is_uint32); void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code); int DefineDeoptimizationLiteral(Handle<Object> literal); @@ -319,14 +316,11 @@ class LCodeGen BASE_EMBEDDED { int arguments, Safepoint::DeoptMode mode); void RecordPosition(int position); - void RecordAndUpdatePosition(int position); static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); template<class InstrType> - void EmitBranch(InstrType instr, Condition condition); - template<class InstrType> - void EmitFalseBranch(InstrType instr, Condition condition); + void EmitBranch(InstrType instr, Condition cc); void EmitNumberUntagD(Register input, DwVfpRegister result, bool allow_undefined_as_nan, @@ -362,6 +356,12 @@ class LCodeGen BASE_EMBEDDED { // Caller should branch on equal condition. void EmitIsConstructCall(Register temp1, Register temp2); + void EmitLoadFieldOrConstantFunction(Register result, + Register object, + Handle<Map> type, + Handle<String> name, + LEnvironment* env); + // Emits optimized code to deep-copy the contents of statically known // object graphs (e.g. object literal boilerplate). void EmitDeepCopy(Handle<JSObject> object, @@ -418,8 +418,6 @@ class LCodeGen BASE_EMBEDDED { Safepoint::Kind expected_safepoint_kind_; - int old_position_; - class PushSafepointRegistersScope BASE_EMBEDDED { public: PushSafepointRegistersScope(LCodeGen* codegen, diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc index 88ac7a2a21d..7a3c96892c2 100644 --- a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc @@ -254,7 +254,7 @@ void LGapResolver::EmitMove(int index) { } else { __ LoadObject(dst, cgen_->ToHandle(constant_source)); } - } else if (destination->IsDoubleRegister()) { + } else if (source->IsDoubleRegister()) { DwVfpRegister result = cgen_->ToDoubleRegister(destination); double v = cgen_->ToDouble(constant_source); __ Vmov(result, v, ip); diff --git a/chromium/v8/src/arm/macro-assembler-arm.cc b/chromium/v8/src/arm/macro-assembler-arm.cc index a56744bf597..cd124610f97 100644 --- a/chromium/v8/src/arm/macro-assembler-arm.cc +++ b/chromium/v8/src/arm/macro-assembler-arm.cc @@ -375,13 +375,16 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && - isolate()->heap()->RootCanBeTreatedAsConstant(index) && + !Heap::RootCanBeWrittenAfterInitialization(index) && !predictable_code_size()) { - // The CPU supports fast immediate values, and this root will never - // change. We will load it as a relocatable immediate value. - Handle<Object> root(&isolate()->heap()->roots_array_start()[index]); - mov(destination, Operand(root), LeaveCC, cond); - return; + Handle<Object> root(isolate()->heap()->roots_array_start()[index], + isolate()); + if (!isolate()->heap()->InNewSpace(*root)) { + // The CPU supports fast immediate values, and this root will never + // change. We will load it as a relocatable immediate value. + mov(destination, Operand(root), LeaveCC, cond); + return; + } } ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -486,7 +489,7 @@ void MacroAssembler::RecordWrite(Register object, if (emit_debug_code()) { ldr(ip, MemOperand(address)); cmp(ip, value); - Check(eq, kWrongAddressOrValuePassedToRecordWrite); + Check(eq, "Wrong address or value passed to RecordWrite"); } Label done; @@ -1487,7 +1490,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // In debug mode, make sure the lexical context is set. #ifdef DEBUG cmp(scratch, Operand::Zero()); - Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); + Check(ne, "we should not have an empty lexical context"); #endif // Load the native context of the current context. @@ -1505,7 +1508,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); LoadRoot(ip, Heap::kNativeContextMapRootIndex); cmp(holder_reg, ip); - Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); + Check(eq, "JSGlobalObject::native_context should be a native context."); pop(holder_reg); // Restore holder. } @@ -1522,12 +1525,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, mov(holder_reg, ip); // Move ip to its holding place. LoadRoot(ip, Heap::kNullValueRootIndex); cmp(holder_reg, ip); - Check(ne, kJSGlobalProxyContextShouldNotBeNull); + Check(ne, "JSGlobalProxy::context() should not be null."); ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); LoadRoot(ip, Heap::kNativeContextMapRootIndex); cmp(holder_reg, ip); - Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); + Check(eq, "JSGlobalObject::native_context should be a native context."); // Restore ip is not needed. ip is reloaded below. pop(holder_reg); // Restore holder. // Restore ip to holder's context. @@ -1724,7 +1727,7 @@ void MacroAssembler::Allocate(int object_size, // respect to register content between debug and release mode. ldr(ip, MemOperand(topaddr)); cmp(result, ip); - Check(eq, kUnexpectedAllocationTop); + Check(eq, "Unexpected allocation top"); } // Load allocation limit into ip. Result already contains allocation top. ldr(ip, MemOperand(topaddr, limit - top)); @@ -1822,7 +1825,7 @@ void MacroAssembler::Allocate(Register object_size, // respect to register content between debug and release mode. ldr(ip, MemOperand(topaddr)); cmp(result, ip); - Check(eq, kUnexpectedAllocationTop); + Check(eq, "Unexpected allocation top"); } // Load allocation limit into ip. Result already contains allocation top. ldr(ip, MemOperand(topaddr, limit - top)); @@ -1856,7 +1859,7 @@ void MacroAssembler::Allocate(Register object_size, // Update allocation top. result temporarily holds the new top. if (emit_debug_code()) { tst(scratch2, Operand(kObjectAlignmentMask)); - Check(eq, kUnalignedAllocationInNewSpace); + Check(eq, "Unaligned allocation in new space"); } str(scratch2, MemOperand(topaddr)); @@ -1879,7 +1882,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object, mov(scratch, Operand(new_space_allocation_top)); ldr(scratch, MemOperand(scratch)); cmp(object, scratch); - Check(lt, kUndoAllocationOfNonAllocatedMemory); + Check(lt, "Undo allocation of non allocated memory"); #endif // Write the address of the object to un-allocate as the current top. mov(scratch, Operand(new_space_allocation_top)); @@ -2128,7 +2131,7 @@ void MacroAssembler::StoreNumberToDoubleElements( if (emit_debug_code()) { vmrs(ip); tst(ip, Operand(kVFPDefaultNaNModeControlBit)); - Assert(ne, kDefaultNaNModeNotSet); + Assert(ne, "Default NaN mode not set"); } VFPCanonicalizeNaN(double_scratch); b(&store); @@ -2378,7 +2381,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (emit_debug_code()) { ldr(r1, MemOperand(r7, kLevelOffset)); cmp(r1, r6); - Check(eq, kUnexpectedLevelAfterReturnFromApiCall); + Check(eq, "Unexpected level after return from api call"); } sub(r6, r6, Operand(1)); str(r6, MemOperand(r7, kLevelOffset)); @@ -2779,9 +2782,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, } -void MacroAssembler::Assert(Condition cond, BailoutReason reason) { +void MacroAssembler::Assert(Condition cond, const char* msg) { if (emit_debug_code()) - Check(cond, reason); + Check(cond, msg); } @@ -2800,23 +2803,23 @@ void MacroAssembler::AssertFastElements(Register elements) { LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); cmp(elements, ip); b(eq, &ok); - Abort(kJSObjectWithFastElementsMapHasSlowElements); + Abort("JSObject with fast elements map has slow elements"); bind(&ok); pop(elements); } } -void MacroAssembler::Check(Condition cond, BailoutReason reason) { +void MacroAssembler::Check(Condition cond, const char* msg) { Label L; b(cond, &L); - Abort(reason); + Abort(msg); // will not return here bind(&L); } -void MacroAssembler::Abort(BailoutReason reason) { +void MacroAssembler::Abort(const char* msg) { Label abort_start; bind(&abort_start); // We want to pass the msg string like a smi to avoid GC @@ -2824,7 +2827,6 @@ void MacroAssembler::Abort(BailoutReason reason) { // properly. Instead, we pass an aligned pointer that is // a proper v8 smi, but also pass the alignment difference // from the real pointer as a smi. - const char* msg = GetBailoutReason(reason); intptr_t p1 = reinterpret_cast<intptr_t>(msg); intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); @@ -2967,7 +2969,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); b(&ok); bind(&fail); - Abort(kGlobalFunctionsMustHaveInitialMap); + Abort("Global functions must have initial map"); bind(&ok); } } @@ -3036,7 +3038,7 @@ void MacroAssembler::AssertNotSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(ne, kOperandIsASmi); + Check(ne, "Operand is a smi"); } } @@ -3045,7 +3047,7 @@ void MacroAssembler::AssertSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(eq, kOperandIsNotSmi); + Check(eq, "Operand is not smi"); } } @@ -3054,12 +3056,12 @@ void MacroAssembler::AssertString(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(ne, kOperandIsASmiAndNotAString); + Check(ne, "Operand is a smi and not a string"); push(object); ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); pop(object); - Check(lo, kOperandIsNotAString); + Check(lo, "Operand is not a string"); } } @@ -3068,12 +3070,12 @@ void MacroAssembler::AssertName(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(ne, kOperandIsASmiAndNotAName); + Check(ne, "Operand is a smi and not a name"); push(object); ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); CompareInstanceType(object, object, LAST_NAME_TYPE); pop(object); - Check(le, kOperandIsNotAName); + Check(le, "Operand is not a name"); } } @@ -3082,7 +3084,7 @@ void MacroAssembler::AssertName(Register object) { void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { CompareRoot(reg, index); - Check(eq, kHeapNumberMapRegisterClobbered); + Check(eq, "HeapNumberMap register clobbered."); } } @@ -3228,7 +3230,7 @@ void MacroAssembler::CopyBytes(Register src, bind(&word_loop); if (emit_debug_code()) { tst(src, Operand(kPointerSize - 1)); - Assert(eq, kExpectingAlignmentForCopyBytes); + Assert(eq, "Expecting alignment for CopyBytes"); } cmp(length, Operand(kPointerSize)); b(lt, &byte_loop); @@ -3492,7 +3494,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, // Check that the instruction is a ldr reg, [pc + offset] . and_(result, result, Operand(kLdrPCPattern)); cmp(result, Operand(kLdrPCPattern)); - Check(eq, kTheInstructionToPatchShouldBeALoadFromPc); + Check(eq, "The instruction to patch should be a load from pc."); // Result was clobbered. Restore it. ldr(result, MemOperand(ldr_location)); } diff --git a/chromium/v8/src/arm/macro-assembler-arm.h b/chromium/v8/src/arm/macro-assembler-arm.h index 8b9fa2b2216..38308e5cdef 100644 --- a/chromium/v8/src/arm/macro-assembler-arm.h +++ b/chromium/v8/src/arm/macro-assembler-arm.h @@ -144,8 +144,6 @@ class MacroAssembler: public Assembler { Condition cond = al); void Call(Label* target); - void Push(Register src) { push(src); } - void Pop(Register dst) { pop(dst); } // Register move. May do nothing if the registers are identical. void Move(Register dst, Handle<Object> value); @@ -1138,14 +1136,14 @@ class MacroAssembler: public Assembler { // Calls Abort(msg) if the condition cond is not satisfied. // Use --debug_code to enable. - void Assert(Condition cond, BailoutReason reason); + void Assert(Condition cond, const char* msg); void AssertFastElements(Register elements); // Like Assert(), but always enabled. - void Check(Condition cond, BailoutReason reason); + void Check(Condition cond, const char* msg); // Print a message to stdout and abort execution. - void Abort(BailoutReason msg); + void Abort(const char* msg); // Verify restrictions about code generated in stubs. void set_generating_stub(bool value) { generating_stub_ = value; } |