diff options
author | Ben Noordhuis <info@bnoordhuis.nl> | 2013-07-29 21:21:03 +0200 |
---|---|---|
committer | Ben Noordhuis <info@bnoordhuis.nl> | 2013-07-29 21:21:03 +0200 |
commit | 1bd711c8a09e327946f2eca5030e9710dc0e1e6e (patch) | |
tree | 6233c588fca458165ad6e448c5d3fbaa1648f805 /deps/v8/src/mips | |
parent | 17fbd6cd66453da565d77ab557188eab479dab15 (diff) | |
download | node-new-1bd711c8a09e327946f2eca5030e9710dc0e1e6e.tar.gz |
v8: upgrade to v8 3.20.9
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r-- | deps/v8/src/mips/code-stubs-mips.cc | 38 | ||||
-rw-r--r-- | deps/v8/src/mips/deoptimizer-mips.cc | 51 | ||||
-rw-r--r-- | deps/v8/src/mips/lithium-codegen-mips.cc | 151 | ||||
-rw-r--r-- | deps/v8/src/mips/lithium-codegen-mips.h | 10 | ||||
-rw-r--r-- | deps/v8/src/mips/lithium-gap-resolver-mips.cc | 16 | ||||
-rw-r--r-- | deps/v8/src/mips/lithium-mips.cc | 82 | ||||
-rw-r--r-- | deps/v8/src/mips/lithium-mips.h | 41 | ||||
-rw-r--r-- | deps/v8/src/mips/macro-assembler-mips.cc | 11 | ||||
-rw-r--r-- | deps/v8/src/mips/stub-cache-mips.cc | 15 |
9 files changed, 197 insertions, 218 deletions
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index f984b3a7b7..0e1b224ead 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -39,6 +39,16 @@ namespace v8 { namespace internal { +void ToNumberStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = NULL; +} + + void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -287,16 +297,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register rhs); -// Check if the operand is a heap number. -static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, - Register scratch1, Register scratch2, - Label* not_a_heap_number) { - __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); - __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); - __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2)); -} - - void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. Isolate* isolate = masm->isolate(); @@ -321,24 +321,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { } -void ToNumberStub::Generate(MacroAssembler* masm) { - // The ToNumber stub takes one argument in a0. - Label check_heap_number, call_builtin; - __ JumpIfNotSmi(a0, &check_heap_number); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); - - __ bind(&check_heap_number); - EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); - - __ bind(&call_builtin); - __ push(a0); - __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); -} - - void FastNewClosureStub::Generate(MacroAssembler* masm) { // Create a new closure from the given function info in new // space. Set the context to the current context in cp. diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 840462e43f..57d3880ede 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -43,22 +43,8 @@ int Deoptimizer::patch_size() { } -void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( - JSFunction* function) { - Isolate* isolate = function->GetIsolate(); - HandleScope scope(isolate); - DisallowHeapAllocation nha; - - ASSERT(function->IsOptimized()); - ASSERT(function->FunctionsInFunctionListShareSameCode()); - - // Get the optimized code. - Code* code = function->code(); +void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { Address code_start_address = code->instruction_start(); - - // The optimized code is going to be patched, so we cannot use it any more. - function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function"); - // Invalidate the relocation information, as it will become invalid by the // code patching below, and is not needed any more. code->InvalidateRelocation(); @@ -89,30 +75,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( prev_call_address = call_address; #endif } - - // Add the deoptimizing code to the list. - DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = isolate->deoptimizer_data(); - node->set_next(data->deoptimizing_code_list_); - data->deoptimizing_code_list_ = node; - - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(code); - - ReplaceCodeForRelatedFunctions(function, code); - - if (FLAG_trace_deopt) { - PrintF("[forced deoptimization: "); - function->PrintName(); - PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); -#ifdef DEBUG - if (FLAG_print_code) { - code->PrintLn(); - } -#endif - } } @@ -648,6 +610,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { count() * table_entry_size_); } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + #undef __ diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 65b4a575f7..5cf1d59e49 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -271,6 +271,7 @@ bool LCodeGen::GenerateBody() { instr->CompileToNative(this); } EnsureSpaceForLazyDeopt(); + last_lazy_deopt_pc_ = masm()->pc_offset(); return !is_aborted(); } @@ -410,11 +411,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { Abort("EmitLoadRegister: Unsupported double immediate."); } else { ASSERT(r.IsTagged()); - if (literal->IsSmi()) { - __ li(scratch, Operand(literal)); - } else { - __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal)); - } + __ LoadObject(scratch, literal); } return scratch; } else if (op->IsStackSlot() || op->IsArgument()) { @@ -480,9 +477,18 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const { } -int LCodeGen::ToInteger32(LConstantOperand* op) const { +int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { + return ToRepresentation(op, Representation::Integer32()); +} + + +int32_t LCodeGen::ToRepresentation(LConstantOperand* op, + const Representation& r) const { HConstant* constant = chunk_->LookupConstant(op); - return constant->Integer32Value(); + int32_t value = constant->Integer32Value(); + if (r.IsInteger32()) return value; + ASSERT(r.IsSmiOrTagged()); + return reinterpret_cast<int32_t>(Smi::FromInt(value)); } @@ -504,7 +510,10 @@ Operand LCodeGen::ToOperand(LOperand* op) { LConstantOperand* const_op = LConstantOperand::cast(op); HConstant* constant = chunk()->LookupConstant(const_op); Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { + if (r.IsSmi()) { + ASSERT(constant->HasSmiValue()); + return Operand(Smi::FromInt(constant->Integer32Value())); + } else if (r.IsInteger32()) { ASSERT(constant->HasInteger32Value()); return Operand(constant->Integer32Value()); } else if (r.IsDouble()) { @@ -789,14 +798,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, } -void LCodeGen::SoftDeoptimize(LEnvironment* environment, - Register src1, - const Operand& src2) { - ASSERT(!info()->IsStub()); - DeoptimizeIf(al, environment, Deoptimizer::SOFT, src1, src2); -} - - void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { ZoneList<Handle<Map> > maps(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); @@ -1378,7 +1379,9 @@ void LCodeGen::DoMulI(LMulI* instr) { if (right_op->IsConstantOperand() && !can_overflow) { // Use optimized code for specific constants. - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); + int32_t constant = ToRepresentation( + LConstantOperand::cast(right_op), + instr->hydrogen()->right()->representation()); if (bailout_on_minus_zero && (constant < 0)) { // The case of a null constant will be handled separately. @@ -1445,13 +1448,25 @@ void LCodeGen::DoMulI(LMulI* instr) { if (can_overflow) { // hi:lo = left * right. - __ mult(left, right); - __ mfhi(scratch); - __ mflo(result); + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiUntag(result, left); + __ mult(result, right); + __ mfhi(scratch); + __ mflo(result); + } else { + __ mult(left, right); + __ mfhi(scratch); + __ mflo(result); + } __ sra(at, result, 31); DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); } else { - __ Mul(result, left, right); + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiUntag(result, left); + __ Mul(result, result, right); + } else { + __ Mul(result, left, right); + } } if (bailout_on_minus_zero) { @@ -1635,12 +1650,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(); AllowDeferredHandleDereference smi_check; - if (value->IsSmi()) { - __ li(ToRegister(instr->result()), Operand(value)); - } else { - __ LoadHeapObject(ToRegister(instr->result()), - Handle<HeapObject>::cast(value)); - } + __ LoadObject(ToRegister(instr->result()), value); } @@ -1819,7 +1829,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { LOperand* right = instr->right(); HMathMinMax::Operation operation = instr->hydrogen()->operation(); Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; - if (instr->hydrogen()->representation().IsInteger32()) { + if (instr->hydrogen()->representation().IsSmiOrInteger32()) { Register left_reg = ToRegister(left); Operand right_op = (right->IsRegister() || right->IsConstantOperand()) ? ToOperand(right) @@ -2239,13 +2249,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { } -void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { - Register left = ToRegister(instr->left()); - - EmitBranch(instr, eq, left, Operand(instr->hydrogen()->right())); -} - - Condition LCodeGen::EmitIsObject(Register input, Register temp1, Register temp2, @@ -2900,9 +2903,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); } - } else if (lookup.IsConstantFunction()) { - Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); - __ LoadHeapObject(result, function); + } else if (lookup.IsConstant()) { + Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate()); + __ LoadObject(result, constant); } else { // Negative lookup. // Check prototypes. @@ -4186,9 +4189,25 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } +void LCodeGen::ApplyCheckIf(Condition cc, + LBoundsCheck* check, + Register src1, + const Operand& src2) { + if (FLAG_debug_code && check->hydrogen()->skip_check()) { + Label done; + __ Branch(&done, NegateCondition(cc), src1, src2); + __ stop("eliminated bounds check failed"); + __ bind(&done); + } else { + DeoptimizeIf(cc, check->environment(), src1, src2); + } +} + + void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { if (instr->hydrogen()->skip_check()) return; + Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; if (instr->index()->IsConstantOperand()) { int constant_index = ToInteger32(LConstantOperand::cast(instr->index())); @@ -4197,13 +4216,13 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } else { __ li(at, Operand(constant_index)); } - DeoptimizeIf(hs, - instr->environment(), + ApplyCheckIf(condition, + instr, at, Operand(ToRegister(instr->length()))); } else { - DeoptimizeIf(hs, - instr->environment(), + ApplyCheckIf(condition, + instr, ToRegister(instr->index()), Operand(ToRegister(instr->length()))); } @@ -5194,6 +5213,7 @@ void LCodeGen::DoCheckMapCommon(Register map_reg, void LCodeGen::DoCheckMaps(LCheckMaps* instr) { + if (instr->hydrogen()->CanOmitMapChecks()) return; Register map_reg = scratch0(); LOperand* input = instr->value(); ASSERT(input->IsRegister()); @@ -5262,6 +5282,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { + if (instr->hydrogen()->CanOmitPrototypeChecks()) return; + Register prototype_reg = ToRegister(instr->temp()); Register map_reg = ToRegister(instr->temp2()); @@ -5270,12 +5292,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { ASSERT(prototypes->length() == maps->length()); - if (!instr->hydrogen()->CanOmitPrototypeChecks()) { - for (int i = 0; i < prototypes->length(); i++) { - __ LoadHeapObject(prototype_reg, prototypes->at(i)); - __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); - DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); - } + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(prototype_reg, prototypes->at(i)); + __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); + DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); } } @@ -5323,6 +5343,25 @@ void LCodeGen::DoAllocate(LAllocate* instr) { } __ bind(deferred->exit()); + + if (instr->hydrogen()->MustPrefillWithFiller()) { + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + __ li(scratch, Operand(size)); + } else { + scratch = ToRegister(instr->size()); + } + __ Subu(scratch, scratch, Operand(kPointerSize)); + __ Subu(result, result, Operand(kHeapObjectTag)); + Label loop; + __ bind(&loop); + __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + __ Addu(at, result, Operand(scratch)); + __ sw(scratch2, MemOperand(at)); + __ Subu(scratch, scratch, Operand(kPointerSize)); + __ Branch(&loop, ge, scratch, Operand(zero_reg)); + __ Addu(result, result, Operand(kHeapObjectTag)); + } } @@ -5615,12 +5654,12 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { padding_size -= Assembler::kInstrSize; } } - last_lazy_deopt_pc_ = masm()->pc_offset(); } void LCodeGen::DoLazyBailout(LLazyBailout* instr) { EnsureSpaceForLazyDeopt(); + last_lazy_deopt_pc_ = masm()->pc_offset(); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); @@ -5629,11 +5668,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - if (instr->hydrogen_value()->IsSoftDeoptimize()) { - SoftDeoptimize(instr->environment(), zero_reg, Operand(zero_reg)); - } else { - DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); + Deoptimizer::BailoutType type = instr->hydrogen()->type(); + // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the + // needed return address), even though the implementation of LAZY and EAGER is + // now identical. When LAZY is eventually completely folded into EAGER, remove + // the special case below. + if (info()->IsStub() && type == Deoptimizer::EAGER) { + type = Deoptimizer::LAZY; } + DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg)); } @@ -5676,6 +5719,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { StackCheckStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); + last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -5687,6 +5731,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); EnsureSpaceForLazyDeopt(); + last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(instr->done_label()); deferred_stack_check->SetExit(instr->done_label()); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index 1cba8cf468..a485b67db9 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -114,7 +114,8 @@ class LCodeGen BASE_EMBEDDED { DoubleRegister EmitLoadDoubleRegister(LOperand* op, FloatRegister flt_scratch, DoubleRegister dbl_scratch); - int ToInteger32(LConstantOperand* op) const; + int ToRepresentation(LConstantOperand* op, const Representation& r) const; + int32_t ToInteger32(LConstantOperand* op) const; Smi* ToSmi(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; Operand ToOperand(LOperand* op); @@ -284,9 +285,10 @@ class LCodeGen BASE_EMBEDDED { LEnvironment* environment, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg)); - void SoftDeoptimize(LEnvironment* environment, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); + void ApplyCheckIf(Condition cc, + LBoundsCheck* check, + Register src1 = zero_reg, + const Operand& src2 = Operand(zero_reg)); void AddToTranslation(Translation* translation, LOperand* op, diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc index 9705e1f41a..771b22862e 100644 --- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc +++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc @@ -251,10 +251,10 @@ void LGapResolver::EmitMove(int index) { LConstantOperand* constant_source = LConstantOperand::cast(source); if (destination->IsRegister()) { Register dst = cgen_->ToRegister(destination); - if (cgen_->IsSmi(constant_source)) { - __ li(dst, Operand(cgen_->ToSmi(constant_source))); - } else if (cgen_->IsInteger32(constant_source)) { - __ li(dst, Operand(cgen_->ToInteger32(constant_source))); + Representation r = cgen_->IsSmi(constant_source) + ? Representation::Smi() : Representation::Integer32(); + if (cgen_->IsInteger32(constant_source)) { + __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r))); } else { __ LoadObject(dst, cgen_->ToHandle(constant_source)); } @@ -265,11 +265,11 @@ void LGapResolver::EmitMove(int index) { } else { ASSERT(destination->IsStackSlot()); ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsSmi(constant_source)) { - __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source))); - } else if (cgen_->IsInteger32(constant_source)) { + Representation r = cgen_->IsSmi(constant_source) + ? Representation::Smi() : Representation::Integer32(); + if (cgen_->IsInteger32(constant_source)) { __ li(kLithiumScratchReg, - Operand(cgen_->ToInteger32(constant_source))); + Operand(cgen_->ToRepresentation(constant_source, r))); } else { __ LoadObject(kLithiumScratchReg, cgen_->ToHandle(constant_source)); diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index c64533cdfc..b03cea44cb 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -706,11 +706,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { } -LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { - return AssignEnvironment(new(zone()) LDeoptimize); -} - - LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -788,8 +783,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, op == Token::SUB); HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsSmiOrTagged()); - ASSERT(right->representation().IsSmiOrTagged()); + ASSERT(left->representation().IsTagged()); + ASSERT(right->representation().IsTagged()); LOperand* left_operand = UseFixed(left, a1); LOperand* right_operand = UseFixed(right, a0); LArithmeticT* result = @@ -1320,17 +1315,17 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) { LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineAsRegister(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsSmiOrTagged()); - ASSERT(instr->left()->representation().IsSmiOrTagged()); - ASSERT(instr->right()->representation().IsSmiOrTagged()); + ASSERT(instr->representation().IsTagged()); + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); LOperand* left = UseFixed(instr->left(), a1); LOperand* right = UseFixed(instr->right(), a0); @@ -1352,7 +1347,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { if (instr->representation().IsDouble()) { return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsInteger32()) { + } else if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LDivI* div = new(zone()) LDivI(dividend, divisor); @@ -1419,9 +1416,9 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) { HValue* left = instr->left(); HValue* right = instr->right(); - if (instr->representation().IsInteger32()) { - ASSERT(left->representation().IsInteger32()); - ASSERT(right->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); if (instr->HasPowerOf2Divisor()) { ASSERT(!right->CanBeZero()); LModI* mod = new(zone()) LModI(UseRegisterAtStart(left), @@ -1449,7 +1446,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { ? AssignEnvironment(result) : result; } - } else if (instr->representation().IsSmiOrTagged()) { + } else if (instr->representation().IsTagged()) { return DoArithmeticT(Token::MOD, instr); } else { ASSERT(instr->representation().IsDouble()); @@ -1465,9 +1462,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { LInstruction* LChunkBuilder::DoMul(HMul* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* left; LOperand* right = UseOrConstant(instr->BetterRightOperand()); LOperand* temp = NULL; @@ -1510,9 +1507,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { LInstruction* LChunkBuilder::DoSub(HSub* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1539,9 +1536,9 @@ LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); LAddI* add = new(zone()) LAddI(left, right); @@ -1562,7 +1559,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsSmiOrTagged()); + ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -1571,9 +1568,9 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { LOperand* left = NULL; LOperand* right = NULL; - if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseOrConstantAtStart(instr->BetterRightOperand()); } else { @@ -1652,13 +1649,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( } -LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( - HCompareConstantEqAndBranch* instr) { - return new(zone()) LCmpConstantEqAndBranch( - UseRegisterAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* temp = TempRegister(); @@ -1956,9 +1946,14 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LUnallocated* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); + LUnallocated* temp1 = NULL; + LOperand* temp2 = NULL; + if (!instr->CanOmitPrototypeChecks()) { + temp1 = TempRegister(); + temp2 = TempRegister(); + } LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + if (instr->CanOmitPrototypeChecks()) return result; return AssignEnvironment(result); } @@ -1970,8 +1965,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* value = NULL; + if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value()); LInstruction* result = new(zone()) LCheckMaps(value); + if (instr->CanOmitMapChecks()) return result; return AssignEnvironment(result); } @@ -2128,8 +2125,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsSmi()); + ASSERT(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LLoadKeyed* result = NULL; diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index 83a37c6230..2b55906352 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -40,12 +40,6 @@ namespace internal { // Forward declarations. class LCodeGen; -#define LITHIUM_ALL_INSTRUCTION_LIST(V) \ - V(ControlInstruction) \ - V(Call) \ - LITHIUM_CONCRETE_INSTRUCTION_LIST(V) - - #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ V(AccessArgumentsAt) \ V(AddI) \ @@ -72,6 +66,7 @@ class LCodeGen; V(CheckFunction) \ V(CheckInstanceType) \ V(CheckMaps) \ + V(CheckMapValue) \ V(CheckNonSmi) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ @@ -79,7 +74,6 @@ class LCodeGen; V(ClampIToUint8) \ V(ClampTToUint8) \ V(ClassOfTestAndBranch) \ - V(CmpConstantEqAndBranch) \ V(CompareNumericAndBranch) \ V(CmpObjectEqAndBranch) \ V(CmpMapAndBranch) \ @@ -89,14 +83,18 @@ class LCodeGen; V(ConstantS) \ V(ConstantT) \ V(Context) \ + V(DateField) \ V(DebugBreak) \ V(DeclareGlobals) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ V(DoubleToSmi) \ + V(Drop) \ V(DummyUse) \ V(ElementsKind) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ V(GlobalObject) \ @@ -104,13 +102,13 @@ class LCodeGen; V(Goto) \ V(HasCachedArrayIndexAndBranch) \ V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Integer32ToSmi) \ - V(Uint32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ V(IsObjectAndBranch) \ @@ -123,6 +121,7 @@ class LCodeGen; V(LinkObjectInList) \ V(LoadContextSlot) \ V(LoadExternalArrayPointer) \ + V(LoadFieldByIndex) \ V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ @@ -185,17 +184,10 @@ class LCodeGen; V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ V(UnknownOSRValue) \ V(ValueOf) \ - V(ForInPrepareMap) \ - V(ForInCacheArray) \ - V(CheckMapValue) \ - V(LoadFieldByIndex) \ - V(DateField) \ - V(WrapReceiver) \ - V(Drop) \ - V(InnerAllocatedObject) - + V(WrapReceiver) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ virtual Opcode opcode() const { return LInstruction::k##type; } \ @@ -431,6 +423,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> { class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") + DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -888,20 +881,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { }; -class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { - public: - explicit LCmpConstantEqAndBranch(LOperand* left) { - inputs_[0] = left; - } - - LOperand* left() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch, - "cmp-constant-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch) -}; - - class LIsObjectAndBranch: public LControlInstruction<1, 1> { public: LIsObjectAndBranch(LOperand* value, LOperand* temp) { diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 8a44185ed7..ea08a552be 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -2882,6 +2882,7 @@ void MacroAssembler::Allocate(int object_size, Register scratch2, Label* gc_required, AllocationFlags flags) { + ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -4968,9 +4969,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register scratch1, Register scratch2, Label* failure) { - int kFlatAsciiStringMask = + const int kFlatAsciiStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; - int kFlatAsciiStringTag = ASCII_STRING_TYPE; + const int kFlatAsciiStringTag = + kStringTag | kOneByteStringTag | kSeqStringTag; ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. andi(scratch1, first, kFlatAsciiStringMask); Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); @@ -4982,9 +4984,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label* failure) { - int kFlatAsciiStringMask = + const int kFlatAsciiStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; - int kFlatAsciiStringTag = ASCII_STRING_TYPE; + const int kFlatAsciiStringTag = + kStringTag | kOneByteStringTag | kSeqStringTag; And(scratch, type, Operand(kFlatAsciiStringMask)); Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); } diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 89d8e68d5e..c4b1ee57a7 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -470,10 +470,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, Representation representation = details.representation(); ASSERT(!representation.IsNone()); - if (details.type() == CONSTANT_FUNCTION) { - Handle<HeapObject> constant( - HeapObject::cast(descriptors->GetValue(descriptor))); - __ LoadHeapObject(scratch1, constant); + if (details.type() == CONSTANT) { + Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + __ LoadObject(scratch1, constant); __ Branch(miss_label, ne, value_reg, Operand(scratch1)); } else if (FLAG_track_fields && representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); @@ -532,7 +531,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - if (details.type() == CONSTANT_FUNCTION) { + if (details.type() == CONSTANT) { ASSERT(value_reg.is(a0)); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); @@ -1404,9 +1403,9 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg, } -void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { +void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. - __ LoadHeapObject(v0, value); + __ LoadObject(v0, value); __ Ret(); } @@ -2709,7 +2708,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant( Handle<Code> code = CompileCustomCall(object, holder, Handle<Cell>::null(), function, Handle<String>::cast(name), - Code::CONSTANT_FUNCTION); + Code::CONSTANT); // A null handle means bail out to the regular compiler code below. if (!code.is_null()) return code; } |