diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2010-07-14 11:16:20 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2010-07-14 11:16:20 -0700 |
commit | 8e2530c320d19717ffd3a2685a41e277e35235fd (patch) | |
tree | f11fd572cc3e50fc1ca9db2eac88ccd32b52589c /deps/v8 | |
parent | 870aa3d97f65a58f80e8347c796f5ba685410462 (diff) | |
download | node-new-8e2530c320d19717ffd3a2685a41e277e35235fd.tar.gz |
Upgrade V8 to 2.2.24
Diffstat (limited to 'deps/v8')
81 files changed, 6627 insertions, 5752 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 602ad80488..cdadfba3a2 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,18 @@ +2010-07-14: Version 2.2.24 + + Added API for capturing stack traces for uncaught exceptions. + + Fixed crash bug when preparsing from a non-external V8 string + (issue 775). + + Fixed JSON.parse bug causing input not to be converted to string + (issue 764). + + Added ES5 Object.freeze and Object.isFrozen. + + Performance improvements on all platforms. + + 2010-07-07: Version 2.2.23 API change: Convert Unicode code points outside the basic multilingual @@ -11,6 +26,7 @@ Performance improvements on all platforms. + 2010-07-05: Version 2.2.22 Added ES5 Object.isExtensible and Object.preventExtensions. diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ca4a247fe8..9e4cebb73a 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -694,6 +694,13 @@ class V8EXPORT Message { Handle<Value> GetScriptData() const; /** + * Exception stack trace. By default stack traces are not captured for + * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows + * to change this option. + */ + Handle<StackTrace> GetStackTrace() const; + + /** * Returns the number, 1-based, of the line where the error occurred. */ int GetLineNumber() const; @@ -2459,6 +2466,15 @@ class V8EXPORT V8 { static void RemoveMessageListeners(MessageCallback that); /** + * Tells V8 to capture current stack trace when uncaught exception occurs + * and report it to the message listeners. The option is off by default. + */ + static void SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit = 10, + StackTrace::StackTraceOptions options = StackTrace::kOverview); + + /** * Sets V8 flags from a string. */ static void SetFlagsFromString(const char* str, int length); diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index e41db94730..9fbfe56dae 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -549,7 +549,7 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) { if (frame->function() != *function) continue; // If there is an arguments variable in the stack, we return that. - int index = ScopeInfo<>::StackSlotIndex(frame->code(), + int index = ScopeInfo<>::StackSlotIndex(function->shared()->scope_info(), Heap::arguments_symbol()); if (index >= 0) { Handle<Object> arguments = Handle<Object>(frame->GetExpression(index)); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 0f64dd45ec..07d9eb0ac7 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1438,6 +1438,22 @@ v8::Handle<Value> Message::GetScriptData() const { } +v8::Handle<v8::StackTrace> Message::GetStackTrace() const { + if (IsDeadCheck("v8::Message::GetStackTrace()")) { + return Local<v8::StackTrace>(); + } + ENTER_V8; + HandleScope scope; + i::Handle<i::JSObject> obj = + i::Handle<i::JSObject>::cast(Utils::OpenHandle(this)); + i::Handle<i::Object> stackFramesObj = GetProperty(obj, "stackFrames"); + if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>(); + i::Handle<i::JSArray> stackTrace = + i::Handle<i::JSArray>::cast(stackFramesObj); + return scope.Close(Utils::StackTraceToLocal(stackTrace)); +} + + static i::Handle<i::Object> CallV8HeapFunction(const char* name, i::Handle<i::Object> recv, int argc, @@ -1583,7 +1599,9 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit, StackTraceOptions options) { if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>(); ENTER_V8; - return i::Top::CaptureCurrentStackTrace(frame_limit, options); + i::Handle<i::JSArray> stackTrace = + i::Top::CaptureCurrentStackTrace(frame_limit, options); + return Utils::StackTraceToLocal(stackTrace); } @@ -3782,6 +3800,17 @@ void V8::RemoveMessageListeners(MessageCallback that) { } +void V8::SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit, + StackTrace::StackTraceOptions options) { + i::Top::SetCaptureStackTraceForUncaughtExceptions( + capture, + frame_limit, + options); +} + + void V8::SetCounterFunction(CounterLookupCallback callback) { if (IsDeadCheck("v8::V8::SetCounterFunction()")) return; i::StatsTable::SetCounterFunction(callback); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index f5ff43a656..0dc6b77106 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1801,11 +1801,119 @@ void Assembler::vstr(const DwVfpRegister src, } +static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { + uint64_t i; + memcpy(&i, &d, 8); + + *lo = i & 0xffffffff; + *hi = i >> 32; +} + +// Only works for little endian floating point formats. +// We don't support VFP on the mixed endian floating point platform. +static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + + // VMOV can accept an immediate of the form: + // + // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 + // + // The immediate is encoded using an 8-bit quantity, comprised of two + // 4-bit fields. For an 8-bit immediate of the form: + // + // [abcdefgh] + // + // where a is the MSB and h is the LSB, an immediate 64-bit double can be + // created of the form: + // + // [aBbbbbbb,bbcdefgh,00000000,00000000, + // 00000000,00000000,00000000,00000000] + // + // where B = ~b. + // + + uint32_t lo, hi; + DoubleAsTwoUInt32(d, &lo, &hi); + + // The most obvious constraint is the long block of zeroes. + if ((lo != 0) || ((hi & 0xffff) != 0)) { + return false; + } + + // Bits 62:55 must be all clear or all set. + if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) { + return false; + } + + // Bit 63 must be NOT bit 62. + if (((hi ^ (hi << 1)) & (0x40000000)) == 0) { + return false; + } + + // Create the encoded immediate in the form: + // [00000000,0000abcd,00000000,0000efgh] + *encoding = (hi >> 16) & 0xf; // Low nybble. + *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble. + *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble. + + return true; +} + + +void Assembler::vmov(const DwVfpRegister dst, + double imm, + const Condition cond) { + // Dd = immediate + // Instruction details available in ARM DDI 0406B, A8-640. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + + uint32_t enc; + if (FitsVMOVDoubleImmediate(imm, &enc)) { + // The double can be encoded in the instruction. + emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc); + } else { + // Synthesise the double from ARM immediates. This could be implemented + // using vldr from a constant pool. + uint32_t lo, hi; + DoubleAsTwoUInt32(imm, &lo, &hi); + + if (lo == hi) { + // If the lo and hi parts of the double are equal, the literal is easier + // to create. This is the case with 0.0. + mov(ip, Operand(lo)); + vmov(dst, ip, ip); + } else { + // Move the low part of the double into the lower of the corresponsing S + // registers of D register dst. + mov(ip, Operand(lo)); + vmov(dst.low(), ip, cond); + + // Move the high part of the double into the higher of the corresponsing S + // registers of D register dst. + mov(ip, Operand(hi)); + vmov(dst.high(), ip, cond); + } + } +} + + +void Assembler::vmov(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { + // Sd = Sm + // Instruction details available in ARM DDI 0406B, A8-642. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0xB*B20 | + dst.code()*B12 | 0x5*B9 | B6 | src.code()); +} + + void Assembler::vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Dd = Dm // Instruction details available in ARM DDI 0406B, A8-642. + ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code()); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 6a4fb23e85..226fb87403 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -130,6 +130,20 @@ struct DwVfpRegister { // Supporting d0 to d15, can be later extended to d31. bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is(DwVfpRegister reg) const { return code_ == reg.code_; } + SwVfpRegister low() const { + SwVfpRegister reg; + reg.code_ = code_ * 2; + + ASSERT(reg.is_valid()); + return reg; + } + SwVfpRegister high() const { + SwVfpRegister reg; + reg.code_ = (code_ * 2) + 1; + + ASSERT(reg.is_valid()); + return reg; + } int code() const { ASSERT(is_valid()); return code_; @@ -932,6 +946,12 @@ class Assembler : public Malloced { const Condition cond = al); void vmov(const DwVfpRegister dst, + double imm, + const Condition cond = al); + void vmov(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); + void vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond = al); void vmov(const DwVfpRegister dst, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index fa6efcd3c1..6e0604bc6e 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -4343,9 +4343,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { __ bind(&powi); // Load 1.0 into d0. - __ mov(scratch2, Operand(0x3ff00000)); - __ mov(scratch1, Operand(0)); - __ vmov(d0, scratch1, scratch2); + __ vmov(d0, 1.0); // Get the absolute untagged value of the exponent and use that for the // calculation. @@ -4405,9 +4403,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { AVOID_NANS_AND_INFINITIES); // Load 1.0 into d2. - __ mov(scratch2, Operand(0x3ff00000)); - __ mov(scratch1, Operand(0)); - __ vmov(d2, scratch1, scratch2); + __ vmov(d2, 1.0); // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x). __ vdiv(d0, d2, d0); @@ -4874,12 +4870,8 @@ void CodeGenerator::GenerateRandomHeapNumber( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r4, Operand(r0)); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 002e4c1368..3df7b4e08e 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -37,6 +37,26 @@ namespace arm { namespace v8i = v8::internal; +double Instr::DoubleImmedVmov() const { + // Reconstruct a double from the immediate encoded in the vmov instruction. + // + // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh] + // double: [aBbbbbbb,bbcdefgh,00000000,00000000, + // 00000000,00000000,00000000,00000000] + // + // where B = ~b. Only the high 16 bits are affected. + uint64_t high16; + high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh. + high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx. + high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx. + high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx. + + uint64_t imm = high16 << 48; + double d; + memcpy(&d, &imm, 8); + return d; +} + // These register names are defined in a way to match the native disassembler // formatting. See for example the command "objdump -d <binary file>". diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index fa9adbd704..2ac9a41326 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -333,6 +333,9 @@ class Instr { inline bool HasH() const { return HField() == 1; } inline bool HasLink() const { return LinkField() == 1; } + // Decoding the double immediate in the vmov instruction. + double DoubleImmedVmov() const; + // Instructions are read of out a code stream. The only way to get a // reference to an instruction is to convert a pointer. There is no way // to allocate or create instances of class Instr. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index a52417beef..37401ed28f 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -412,6 +412,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) { PrintCondition(instr); return 4; } + case 'd': { // 'd: vmov double immediate. + double d = instr->DoubleImmedVmov(); + out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, + "#%g", d); + return 1; + } case 'f': { // 'f: bitfield instructions - v7 and above. uint32_t lsbit = instr->Bits(11, 7); uint32_t width = instr->Bits(20, 16) + 1; @@ -1052,7 +1058,7 @@ void Decoder::DecodeTypeVFP(Instr* instr) { if (instr->SzField() == 0x1) { Format(instr, "vmov.f64'cond 'Dd, 'Dm"); } else { - Unknown(instr); // Not used by V8. + Format(instr, "vmov.f32'cond 'Sd, 'Sm"); } } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -1066,6 +1072,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) { DecodeVCMP(instr); } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) { Format(instr, "vsqrt.f64'cond 'Dd, 'Dm"); + } else if (instr->Opc3Field() == 0x0) { + if (instr->SzField() == 0x1) { + Format(instr, "vmov.f64'cond 'Dd, 'd"); + } else { + Unknown(instr); // Not used by V8. + } } else { Unknown(instr); // Not used by V8. } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 080cb83392..3fb946aa65 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -2161,12 +2161,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r4, Operand(r0)); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2896cc96e7..f251b31f0b 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -873,88 +873,6 @@ void MacroAssembler::PopTryHandler() { } -Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, - JSObject* holder, Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss) { - // Make sure there's no overlap between scratch and the other - // registers. - ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); - - // Keep track of the current object in register reg. - Register reg = object_reg; - int depth = 0; - - if (save_at_depth == depth) { - str(reg, MemOperand(sp)); - } - - // Check the maps in the prototype chain. - // Traverse the prototype chain from the object and do map checks. - while (object != holder) { - depth++; - - // Only global objects and objects that do not require access - // checks are allowed in stubs. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - // Get the map of the current object. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - cmp(scratch, Operand(Handle<Map>(object->map()))); - - // Branch on the result of the map check. - b(ne, miss); - - // Check access rights to the global object. This has to happen - // after the map check so that we know that the object is - // actually a global object. - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - // Restore scratch register to be the map of the object. In the - // new space case below, we load the prototype from the map in - // the scratch register. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - } - - reg = holder_reg; // from now the object is in holder_reg - JSObject* prototype = JSObject::cast(object->GetPrototype()); - if (Heap::InNewSpace(prototype)) { - // The prototype is in new space; we cannot store a reference - // to it in the code. Load it from the map. - ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset)); - } else { - // The prototype is in old space; load it directly. - mov(reg, Operand(Handle<JSObject>(prototype))); - } - - if (save_at_depth == depth) { - str(reg, MemOperand(sp)); - } - - // Go to the next object in the prototype chain. - object = prototype; - } - - // Check the holder map. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - cmp(scratch, Operand(Handle<Map>(object->map()))); - b(ne, miss); - - // Log the check depth. - LOG(IntEvent("check-maps-depth", depth + 1)); - - // Perform security check for access to the global object and return - // the holder register. - ASSERT(object == holder); - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - } - return reg; -} - - void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index f1f7de7fe4..156e132698 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -316,24 +316,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Inline caching support - // Generates code that verifies that the maps of objects in the - // prototype chain of object hasn't changed since the code was - // generated and branches to the miss label if any map has. If - // necessary the function also generates code for security check - // in case of global object holders. The scratch and holder - // registers are always clobbered, but the object register is only - // clobbered if it the same as the holder register. The function - // returns a register containing the holder - either object_reg or - // holder_reg. - // The function can optionally (when save_at_depth != - // kInvalidProtoDepth) save the object at the given depth by moving - // it to [sp]. - Register CheckMaps(JSObject* object, Register object_reg, - JSObject* holder, Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss); - // Generate code for checking access rights - used for security checks // on access to global objects across environments. The holder register // is left untouched, whereas both scratch registers are clobbered. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index e8910f4860..c67c7aacaa 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -799,7 +799,6 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { CodeDesc code_desc; masm_->GetCode(&code_desc); Handle<Code> code = Factory::NewCode(code_desc, - NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); PROFILE(RegExpCodeCreateEvent(*code, *source)); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index f09ce0035f..3345e4559b 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -2281,7 +2281,7 @@ void Simulator::DecodeTypeVFP(Instr* instr) { if (instr->SzField() == 0x1) { set_d_register_from_double(vd, get_double_from_d_register(vm)); } else { - UNREACHABLE(); // Not used by V8. + set_s_register_from_float(vd, get_float_from_s_register(vm)); } } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -2298,6 +2298,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = sqrt(dm_value); set_d_register_from_double(vd, dd_value); + } else if (instr->Opc3Field() == 0x0) { + // vmov immediate. + if (instr->SzField() == 0x1) { + set_d_register_from_double(vd, instr->DoubleImmedVmov()); + } else { + UNREACHABLE(); // Not used by v8. + } } else { UNREACHABLE(); // Not used by V8. } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 0e649ccd13..a0b6bdb413 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -83,6 +83,112 @@ static void ProbeTable(MacroAssembler* masm, } +// Helper function used to check that the dictionary doesn't contain +// the property. This function may return false negatives, so miss_label +// must always call a backup property check that is complete. +// This function is safe to call if the receiver has fast properties. +// Name must be a symbol and receiver must be a heap object. +static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, + Label* miss_label, + Register receiver, + String* name, + Register scratch0, + Register scratch1) { + ASSERT(name->IsSymbol()); + __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1); + __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); + + Label done; + + const int kInterceptorOrAccessCheckNeededMask = + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + + // Bail out if the receiver has a named interceptor or requires access checks. + Register map = scratch1; + __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); + __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); + __ b(ne, miss_label); + + // Check that receiver is a JSObject. + __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(lt, miss_label); + + // Load properties array. + Register properties = scratch0; + __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + // Check that the properties array is a dictionary. + __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); + Register tmp = properties; + __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); + __ cmp(map, tmp); + __ b(ne, miss_label); + + // Restore the temporarily used register. + __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + + // Compute the capacity mask. + const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + + // Generate an unrolled loop that performs a few probes before + // giving up. + static const int kProbes = 4; + const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the null value). + for (int i = 0; i < kProbes; i++) { + // scratch0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = scratch1; + // Capacity is smi 2^n. + __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); + __ sub(index, index, Operand(1)); + __ and_(index, index, Operand( + Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ add(index, index, Operand(index, LSL, 1)); // index *= 3. + + Register entity_name = scratch1; + // Having undefined at this place means the name is not contained. + ASSERT_EQ(kSmiTagSize, 1); + Register tmp = properties; + __ add(tmp, properties, Operand(index, LSL, 1)); + __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); + + ASSERT(!tmp.is(entity_name)); + __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); + __ cmp(entity_name, tmp); + if (i != kProbes - 1) { + __ b(eq, &done); + + // Stop if found the property. + __ cmp(entity_name, Operand(Handle<String>(name))); + __ b(eq, miss_label); + + // Restore the properties. + __ ldr(properties, + FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + } else { + // Give up probing if still not found the undefined value. + __ b(ne, miss_label); + } + } + __ bind(&done); + __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); +} + + void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, @@ -517,6 +623,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, Label* miss) { ASSERT(holder->HasNamedInterceptor()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); @@ -532,6 +639,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, holder, lookup, name, @@ -543,6 +651,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, name, holder, miss); @@ -555,6 +664,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, JSObject* interceptor_holder, LookupResult* lookup, String* name, @@ -596,7 +706,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, scratch1, - scratch2, name, depth1, miss); + scratch2, scratch3, name, depth1, miss); // Invoke an interceptor and if it provides a value, // branch to |regular_invoke|. @@ -612,7 +722,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { if (interceptor_holder != lookup->holder()) { stub_compiler_->CheckPrototypes(interceptor_holder, receiver, lookup->holder(), scratch1, - scratch2, name, depth2, miss); + scratch2, scratch3, name, depth2, miss); } else { // CheckPrototypes has a side effect of fetching a 'holder' // for API (object which is instanceof for the signature). It's @@ -648,12 +758,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, String* name, JSObject* interceptor_holder, Label* miss_label) { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, + scratch1, scratch2, scratch3, name, miss_label); // Call a runtime function to load the interceptor property. @@ -738,36 +849,134 @@ Register StubCompiler::CheckPrototypes(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, int save_at_depth, - Label* miss, - Register extra) { - // Check that the maps haven't changed. - Register result = - masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, - save_at_depth, miss); + Label* miss) { + // Make sure there's no overlap between holder and object registers. + ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); + + // Keep track of the current object in register reg. + Register reg = object_reg; + int depth = 0; + + if (save_at_depth == depth) { + __ str(reg, MemOperand(sp)); + } + + // Check the maps in the prototype chain. + // Traverse the prototype chain from the object and do map checks. + JSObject* current = object; + while (current != holder) { + depth++; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + + JSObject* prototype = JSObject::cast(current->GetPrototype()); + if (!current->HasFastProperties() && + !current->IsJSGlobalObject() && + !current->IsJSGlobalProxy()) { + if (!name->IsSymbol()) { + Object* lookup_result = Heap::LookupSymbol(name); + if (lookup_result->IsFailure()) { + set_failure(Failure::cast(lookup_result)); + return reg; + } else { + name = String::cast(lookup_result); + } + } + ASSERT(current->property_dictionary()->FindEntry(name) == + StringDictionary::kNotFound); + + GenerateDictionaryNegativeLookup(masm(), + miss, + reg, + name, + scratch1, + scratch2); + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + reg = holder_reg; // from now the object is in holder_reg + __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // Get the map of the current object. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ cmp(scratch1, Operand(Handle<Map>(current->map()))); + + // Branch on the result of the map check. + __ b(ne, miss); + + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + // Restore scratch register to be the map of the object. In the + // new space case below, we load the prototype from the map in + // the scratch register. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + } + + reg = holder_reg; // from now the object is in holder_reg + if (Heap::InNewSpace(prototype)) { + // The prototype is in new space; we cannot store a reference + // to it in the code. Load it from the map. + __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // The prototype is in old space; load it directly. + __ mov(reg, Operand(Handle<JSObject>(prototype))); + } + } + + if (save_at_depth == depth) { + __ str(reg, MemOperand(sp)); + } + + // Go to the next object in the prototype chain. + current = prototype; + } + + // Check the holder map. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ cmp(scratch1, Operand(Handle<Map>(current->map()))); + __ b(ne, miss); + + // Log the check depth. + LOG(IntEvent("check-maps-depth", depth + 1)); + + // Perform security check for access to the global object and return + // the holder register. + ASSERT(current == holder); + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + } // If we've skipped any global objects, it's not enough to verify // that their maps haven't changed. We also need to check that the // property cell for the property is still empty. - while (object != holder) { - if (object->IsGlobalObject()) { + current = object; + while (current != holder) { + if (current->IsGlobalObject()) { Object* cell = GenerateCheckPropertyCell(masm(), - GlobalObject::cast(object), + GlobalObject::cast(current), name, - scratch, + scratch1, miss); if (cell->IsFailure()) { set_failure(Failure::cast(cell)); - return result; + return reg; } } - object = JSObject::cast(object->GetPrototype()); + current = JSObject::cast(current->GetPrototype()); } // Return the register containing the holder. - return result; + return reg; } @@ -776,6 +985,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, int index, String* name, Label* miss) { @@ -785,7 +995,8 @@ void StubCompiler::GenerateLoadField(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, + name, miss); GenerateFastPropertyLoad(masm(), r0, reg, holder, index); __ Ret(); } @@ -796,6 +1007,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, Object* value, String* name, Label* miss) { @@ -805,7 +1017,8 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, scratch3, name, miss); // Return the constant value. __ mov(r0, Operand(Handle<Object>(value))); @@ -819,6 +1032,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, AccessorInfo* callback, String* name, Label* miss, @@ -829,7 +1043,8 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, + name, miss); // Push the arguments on the JS stack of the caller. __ push(receiver); // Receiver. @@ -854,6 +1069,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, String* name, Label* miss) { ASSERT(interceptor_holder->HasNamedInterceptor()); @@ -881,7 +1097,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); // Save necessary data before invoking an interceptor. @@ -930,6 +1147,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, lookup->holder(), scratch1, scratch2, + scratch3, name, miss); } @@ -975,7 +1193,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Call the runtime system to load the interceptor. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, interceptor_holder); @@ -1053,7 +1272,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, __ b(eq, &miss); // Do the right check and compute the holder register. - Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss); + Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss); GenerateFastPropertyLoad(masm(), r1, reg, holder, index); GenerateCallFunction(masm(), object, arguments(), &miss); @@ -1098,7 +1317,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(eq, &miss); // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); if (object->IsGlobalObject()) { __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); @@ -1149,7 +1368,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, __ b(eq, &miss); // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); if (object->IsGlobalObject()) { __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); @@ -1246,7 +1465,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, } // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name, + CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, depth, &miss); // Patch the receiver on the stack with the global proxy if @@ -1270,7 +1489,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::STRING_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; @@ -1290,7 +1509,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::NUMBER_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; } @@ -1313,7 +1532,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::BOOLEAN_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; } @@ -1372,6 +1591,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, r1, r3, r4, + r0, &miss); // Move returned value, the function to call, to r1. @@ -1418,7 +1638,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, r0, holder, r3, r1, name, &miss); + CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss); // Get the value from the cell. __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell))); @@ -1642,7 +1862,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name, __ b(eq, &miss); // Check the maps of the full prototype chain. - CheckPrototypes(object, r0, last, r3, r1, name, &miss); + CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss); // If the last object in the prototype chain is a global object, // check that the global property cell is empty. @@ -1679,7 +1899,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss); + GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1700,7 +1920,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name, Label miss; Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, + bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name, &miss, &failure); if (!success) return failure; @@ -1723,7 +1943,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss); + GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1751,6 +1971,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, r2, r3, r1, + r4, name, &miss); __ bind(&miss); @@ -1782,7 +2003,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } // Check that the map of the global has not changed. - CheckPrototypes(object, r0, holder, r3, r4, name, &miss); + CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss); // Get the value from the cell. __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell))); @@ -1823,7 +2044,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(r0, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss); + GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1847,7 +2068,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ b(ne, &miss); Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, + bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name, &miss, &failure); if (!success) return failure; @@ -1873,7 +2094,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(r0, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss); + GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1905,6 +2126,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, r0, r2, r3, + r4, name, &miss); __ bind(&miss); diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index bbd69ecaba..e1d4489d44 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -812,6 +812,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, initial_map->set_instance_size( initial_map->instance_size() + 5 * kPointerSize); initial_map->set_instance_descriptors(*descriptors); + initial_map->set_scavenger( + Heap::GetScavenger(initial_map->instance_type(), + initial_map->instance_size())); } { // -- J S O N diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index ad52ea18b8..3a0393efbc 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -1475,7 +1475,7 @@ void Builtins::Setup(bool create_heap_objects) { // During startup it's OK to always allocate and defer GC to later. // This simplifies things because we don't need to retry. AlwaysAllocateScope __scope__; - code = Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); + code = Heap::CreateCode(desc, flags, masm.CodeObject()); if (code->IsFailure()) { v8::internal::V8::FatalProcessOutOfMemory("CreateCode"); } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 9d5969bb46..78062b4036 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -102,8 +102,7 @@ Handle<Code> CodeStub::GetCode() { static_cast<Code::Kind>(GetCodeKind()), InLoop(), GetICState()); - Handle<Code> new_object = - Factory::NewCode(desc, NULL, flags, masm.CodeObject()); + Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); if (has_custom_cache()) { @@ -140,8 +139,7 @@ Object* CodeStub::TryGetCode() { static_cast<Code::Kind>(GetCodeKind()), InLoop(), GetICState()); - Object* new_object = - Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); + Object* new_object = Heap::CreateCode(desc, flags, masm.CodeObject()); if (new_object->IsFailure()) return new_object; code = Code::cast(new_object); RecordCodeGeneration(code, &masm); diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 8864c95a6f..84b73a4ecb 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -162,9 +162,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, // Allocate and install the code. CodeDesc desc; masm->GetCode(&desc); - ZoneScopeInfo sinfo(info->scope()); - Handle<Code> code = - Factory::NewCode(desc, &sinfo, flags, masm->CodeObject()); + Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject()); #ifdef ENABLE_DISASSEMBLER bool print_code = Bootstrapper::IsActive() diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ebb9743562..ec6b5ffb4a 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -40,6 +40,7 @@ #include "oprofile-agent.h" #include "rewriter.h" #include "scopes.h" +#include "scopeinfo.h" namespace v8 { namespace internal { @@ -156,7 +157,12 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) { #ifdef ENABLE_DEBUGGER_SUPPORT Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) { Handle<Context> context = Handle<Context>::null(); - return MakeCode(context, info); + Handle<Code> code = MakeCode(context, info); + if (!info->shared_info().is_null()) { + info->shared_info()->set_scope_info( + *ScopeInfo<>::CreateHeapObject(info->scope())); + } + return code; } #endif @@ -252,9 +258,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global, // Allocate function. Handle<SharedFunctionInfo> result = - Factory::NewSharedFunctionInfo(lit->name(), - lit->materialized_literal_count(), - code); + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + code, + ScopeInfo<>::CreateHeapObject(info.scope())); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); Compiler::SetFunctionInfo(result, lit, true, script); @@ -275,9 +283,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global, } -static StaticResource<SafeStringInputBuffer> safe_string_input_buffer; - - Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, Handle<Object> script_name, int line_offset, @@ -306,9 +311,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, // No cache entry found. Do pre-parsing and compile the script. ScriptDataImpl* pre_data = input_pre_data; if (pre_data == NULL && source_length >= FLAG_min_preparse_length) { - Access<SafeStringInputBuffer> buf(&safe_string_input_buffer); - buf->Reset(source.location()); - pre_data = PreParse(source, buf.value(), extension); + pre_data = PreParse(source, NULL, extension); } // Create a script object describing the script to be compiled. @@ -445,8 +448,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) { info->script(), code); - // Update the shared function info with the compiled code. + // Update the shared function info with the compiled code and the scope info. shared->set_code(*code); + shared->set_scope_info(*ScopeInfo<>::CreateHeapObject(info->scope())); // Set the expected number of properties for instances. SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count()); @@ -481,6 +485,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, bool allow_lazy = literal->AllowsLazyCompilation() && !LiveEditFunctionTracker::IsActive(); + Handle<Object> scope_info(ScopeInfo<>::EmptyHeapObject()); + // Generate code Handle<Code> code; if (FLAG_lazy && allow_lazy) { @@ -562,13 +568,15 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, literal->start_position(), script, code); + scope_info = ScopeInfo<>::CreateHeapObject(info.scope()); } // Create a shared function info object. Handle<SharedFunctionInfo> result = Factory::NewSharedFunctionInfo(literal->name(), literal->materialized_literal_count(), - code); + code, + scope_info); SetFunctionInfo(result, literal, false, script); // Set the expected number of properties for instances and return diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 19920d22ee..1eab24c28e 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -120,9 +120,9 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags, // we have context-local slots // check non-parameter locals in context - Handle<Code> code(context->closure()->code()); + Handle<Object> scope_info(context->closure()->shared()->scope_info()); Variable::Mode mode; - int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode); + int index = ScopeInfo<>::ContextSlotIndex(*scope_info, *name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) { // slot found @@ -150,11 +150,11 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags, } // check parameter locals in context - int param_index = ScopeInfo<>::ParameterIndex(*code, *name); + int param_index = ScopeInfo<>::ParameterIndex(*scope_info, *name); if (param_index >= 0) { // slot found. int index = - ScopeInfo<>::ContextSlotIndex(*code, + ScopeInfo<>::ContextSlotIndex(*scope_info, Heap::arguments_shadow_symbol(), NULL); ASSERT(index >= 0); // arguments must exist and be in the heap context @@ -170,7 +170,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags, // check intermediate context (holding only the function name variable) if (follow_context_chain) { - int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name); + int index = ScopeInfo<>::FunctionContextSlotIndex(*scope_info, *name); if (index >= 0) { // slot found if (FLAG_trace_contexts) { @@ -216,18 +216,18 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) { ASSERT(context->is_function_context()); // Check non-parameter locals. - Handle<Code> code(context->closure()->code()); + Handle<Object> scope_info(context->closure()->shared()->scope_info()); Variable::Mode mode; - int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode); + int index = ScopeInfo<>::ContextSlotIndex(*scope_info, *name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) return false; // Check parameter locals. - int param_index = ScopeInfo<>::ParameterIndex(*code, *name); + int param_index = ScopeInfo<>::ParameterIndex(*scope_info, *name); if (param_index >= 0) return false; // Check context only holding the function name variable. - index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name); + index = ScopeInfo<>::FunctionContextSlotIndex(*scope_info, *name); if (index >= 0) return false; context = Context::cast(context->closure()->context()); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index b8e0252aae..4922a62860 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -759,7 +759,7 @@ bool Debug::CompileDebuggerScript(int index) { if (caught_exception) { Handle<Object> message = MessageHandler::MakeMessageObject( "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(), - Handle<String>()); + Handle<String>(), Handle<JSArray>()); MessageHandler::ReportMessage(NULL, message); return false; } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 39e881ac3d..18be639f39 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -277,6 +277,8 @@ Handle<Map> Factory::CopyMap(Handle<Map> src, copy->set_inobject_properties(inobject_properties); copy->set_unused_property_fields(inobject_properties); copy->set_instance_size(copy->instance_size() + instance_size_delta); + copy->set_scavenger(Heap::GetScavenger(copy->instance_type(), + copy->instance_size())); return copy; } @@ -541,10 +543,9 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name, Handle<Code> Factory::NewCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle<Object> self_ref) { - CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code); + CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code); } @@ -680,9 +681,13 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements, Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( - Handle<String> name, int number_of_literals, Handle<Code> code) { + Handle<String> name, + int number_of_literals, + Handle<Code> code, + Handle<Object> scope_info) { Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name); shared->set_code(*code); + shared->set_scope_info(*scope_info); int literals_array_size = number_of_literals; // If the function contains object, regexp or array literals, // allocate extra space for a literals array prefix containing the diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 56deda5ab5..0576d74a6e 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -34,9 +34,6 @@ namespace v8 { namespace internal { -// Forward declarations. -class ZoneScopeInfo; - // Interface for handle based allocation. class Factory : public AllStatic { @@ -241,7 +238,6 @@ class Factory : public AllStatic { PretenureFlag pretenure = TENURED); static Handle<Code> NewCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle<Object> self_reference); @@ -352,7 +348,10 @@ class Factory : public AllStatic { } static Handle<SharedFunctionInfo> NewSharedFunctionInfo( - Handle<String> name, int number_of_literals, Handle<Code> code); + Handle<String> name, + int number_of_literals, + Handle<Code> code, + Handle<Object> scope_info); static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name); static Handle<NumberDictionary> DictionaryAtNumberPut( diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 67a20d3cb8..8b601b67b7 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -532,8 +532,11 @@ void JavaScriptFrame::Print(StringStream* accumulator, if (IsConstructor()) accumulator->Add("new "); accumulator->PrintFunction(function, receiver, &code); + Handle<Object> scope_info(ScopeInfo<>::EmptyHeapObject()); + if (function->IsJSFunction()) { Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared()); + scope_info = Handle<Object>(shared->scope_info()); Object* script_obj = shared->script(); if (script_obj->IsScript()) { Handle<Script> script(Script::cast(script_obj)); @@ -561,7 +564,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, // Get scope information for nicer output, if possible. If code is // NULL, or doesn't contain scope info, info will return 0 for the // number of parameters, stack slots, or context slots. - ScopeInfo<PreallocatedStorage> info(code); + ScopeInfo<PreallocatedStorage> info(*scope_info); // Print the parameters. int parameters_count = ComputeParametersCount(); diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index aea88586fb..7bcc964425 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -345,7 +345,6 @@ class ObjectGroup; class TickSample; class VirtualMemory; class Mutex; -class ZoneScopeInfo; typedef bool (*WeakSlotCallback)(Object** pointer); diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 1b625897d1..ab0d13fc84 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -799,34 +799,34 @@ class ScavengeVisitor: public ObjectVisitor { }; -// A queue of pointers and maps of to-be-promoted objects during a -// scavenge collection. +// A queue of objects promoted during scavenge. Each object is accompanied +// by it's size to avoid dereferencing a map pointer for scanning. class PromotionQueue { public: void Initialize(Address start_address) { - front_ = rear_ = reinterpret_cast<HeapObject**>(start_address); + front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); } bool is_empty() { return front_ <= rear_; } - void insert(HeapObject* object, Map* map) { - *(--rear_) = object; - *(--rear_) = map; + void insert(HeapObject* target, int size) { + *(--rear_) = reinterpret_cast<intptr_t>(target); + *(--rear_) = size; // Assert no overflow into live objects. ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top()); } - void remove(HeapObject** object, Map** map) { - *object = *(--front_); - *map = Map::cast(*(--front_)); + void remove(HeapObject** target, int* size) { + *target = reinterpret_cast<HeapObject*>(*(--front_)); + *size = static_cast<int>(*(--front_)); // Assert no underflow. ASSERT(front_ >= rear_); } private: // The front of the queue is higher in memory than the rear. - HeapObject** front_; - HeapObject** rear_; + intptr_t* front_; + intptr_t* rear_; }; @@ -1041,31 +1041,26 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, // queue is empty. while (new_space_front < new_space_.top()) { HeapObject* object = HeapObject::FromAddress(new_space_front); - object->Iterate(scavenge_visitor); - new_space_front += object->Size(); + Map* map = object->map(); + int size = object->SizeFromMap(map); + object->IterateBody(map->instance_type(), size, scavenge_visitor); + new_space_front += size; } // Promote and process all the to-be-promoted objects. while (!promotion_queue.is_empty()) { - HeapObject* source; - Map* map; - promotion_queue.remove(&source, &map); - // Copy the from-space object to its new location (given by the - // forwarding address) and fix its map. - HeapObject* target = source->map_word().ToForwardingAddress(); - int size = source->SizeFromMap(map); - CopyBlock(target->address(), source->address(), size); - target->set_map(map); - -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) - // Update NewSpace stats if necessary. - RecordCopiedObject(target); -#endif - // Visit the newly copied object for pointers to new space. + HeapObject* target; + int size; + promotion_queue.remove(&target, &size); + + // Promoted object might be already partially visited + // during dirty regions iteration. Thus we search specificly + // for pointers to from semispace instead of looking for pointers + // to new space. ASSERT(!target->IsMap()); - IterateAndMarkPointersToNewSpace(target->address(), - target->address() + size, - &ScavengePointer); + IterateAndMarkPointersToFromSpace(target->address(), + target->address() + size, + &ScavengePointer); } // Take another spin if there are now unswept objects in new space @@ -1077,7 +1072,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) -void Heap::RecordCopiedObject(HeapObject* obj) { +static void RecordCopiedObject(HeapObject* obj) { bool should_record = false; #ifdef DEBUG should_record = FLAG_heap_stats; @@ -1086,22 +1081,24 @@ void Heap::RecordCopiedObject(HeapObject* obj) { should_record = should_record || FLAG_log_gc; #endif if (should_record) { - if (new_space_.Contains(obj)) { - new_space_.RecordAllocation(obj); + if (Heap::new_space()->Contains(obj)) { + Heap::new_space()->RecordAllocation(obj); } else { - new_space_.RecordPromotion(obj); + Heap::new_space()->RecordPromotion(obj); } } } #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) - -HeapObject* Heap::MigrateObject(HeapObject* source, - HeapObject* target, - int size) { +// Helper function used by CopyObject to copy a source object to an +// allocated target object and update the forwarding pointer in the source +// object. Returns the target object. +inline static HeapObject* MigrateObject(HeapObject* source, + HeapObject* target, + int size) { // Copy the content of source to target. - CopyBlock(target->address(), source->address(), size); + Heap::CopyBlock(target->address(), source->address(), size); // Set the forwarding address. source->set_map_word(MapWord::FromForwardingAddress(target)); @@ -1115,117 +1112,281 @@ HeapObject* Heap::MigrateObject(HeapObject* source, } -static inline bool IsShortcutCandidate(HeapObject* object, Map* map) { - STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0); - ASSERT(object->map() == map); - InstanceType type = map->instance_type(); - if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false; - ASSERT(object->IsString() && !object->IsSymbol()); - return ConsString::cast(object)->unchecked_second() == Heap::empty_string(); +enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; +enum SizeRestriction { SMALL, UNKNOWN_SIZE }; + + +template<ObjectContents object_contents, SizeRestriction size_restriction> +static inline void EvacuateObject(Map* map, + HeapObject** slot, + HeapObject* object, + int object_size) { + ASSERT((size_restriction != SMALL) || + (object_size <= Page::kMaxHeapObjectSize)); + ASSERT(object->Size() == object_size); + + if (Heap::ShouldBePromoted(object->address(), object_size)) { + Object* result; + + if ((size_restriction != SMALL) && + (object_size > Page::kMaxHeapObjectSize)) { + result = Heap::lo_space()->AllocateRawFixedArray(object_size); + } else { + if (object_contents == DATA_OBJECT) { + result = Heap::old_data_space()->AllocateRaw(object_size); + } else { + result = Heap::old_pointer_space()->AllocateRaw(object_size); + } + } + + if (!result->IsFailure()) { + HeapObject* target = HeapObject::cast(result); + *slot = MigrateObject(object, target, object_size); + + if (object_contents == POINTER_OBJECT) { + promotion_queue.insert(target, object_size); + } + + Heap::tracer()->increment_promoted_objects_size(object_size); + return; + } + } + Object* result = Heap::new_space()->AllocateRaw(object_size); + ASSERT(!result->IsFailure()); + *slot = MigrateObject(object, HeapObject::cast(result), object_size); + return; } -void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { - ASSERT(InFromSpace(object)); - MapWord first_word = object->map_word(); - ASSERT(!first_word.IsForwardingAddress()); +template<int object_size_in_words, ObjectContents object_contents> +static inline void EvacuateObjectOfFixedSize(Map* map, + HeapObject** slot, + HeapObject* object) { + const int object_size = object_size_in_words << kPointerSizeLog2; + EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); +} + + +template<ObjectContents object_contents> +static inline void EvacuateObjectOfFixedSize(Map* map, + HeapObject** slot, + HeapObject* object) { + int object_size = map->instance_size(); + EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); +} + + +static inline void EvacuateFixedArray(Map* map, + HeapObject** slot, + HeapObject* object) { + int object_size = FixedArray::cast(object)->FixedArraySize(); + EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); +} + + +static inline void EvacuateByteArray(Map* map, + HeapObject** slot, + HeapObject* object) { + int object_size = ByteArray::cast(object)->ByteArraySize(); + EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); +} + + +static Scavenger GetScavengerForSize(int object_size, + ObjectContents object_contents) { + ASSERT(IsAligned(object_size, kPointerSize)); + ASSERT(object_size < Page::kMaxHeapObjectSize); + + switch (object_size >> kPointerSizeLog2) { +#define CASE(n) \ + case n: \ + if (object_contents == DATA_OBJECT) { \ + return static_cast<Scavenger>( \ + &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \ + } else { \ + return static_cast<Scavenger>( \ + &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \ + } + + CASE(1); + CASE(2); + CASE(3); + CASE(4); + CASE(5); + CASE(6); + CASE(7); + CASE(8); + CASE(9); + CASE(10); + CASE(11); + CASE(12); + CASE(13); + CASE(14); + CASE(15); + CASE(16); + default: + if (object_contents == DATA_OBJECT) { + return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>); + } else { + return static_cast<Scavenger>( + &EvacuateObjectOfFixedSize<POINTER_OBJECT>); + } + +#undef CASE + } +} + + +static inline void EvacuateSeqAsciiString(Map* map, + HeapObject** slot, + HeapObject* object) { + int object_size = SeqAsciiString::cast(object)-> + SeqAsciiStringSize(map->instance_type()); + EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); +} + + +static inline void EvacuateSeqTwoByteString(Map* map, + HeapObject** slot, + HeapObject* object) { + int object_size = SeqTwoByteString::cast(object)-> + SeqTwoByteStringSize(map->instance_type()); + EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); +} - // Optimization: Bypass flattened ConsString objects. - if (IsShortcutCandidate(object, first_word.ToMap())) { - object = HeapObject::cast(ConsString::cast(object)->unchecked_first()); - *p = object; - // After patching *p we have to repeat the checks that object is in the - // active semispace of the young generation and not already copied. - if (!InNewSpace(object)) return; - first_word = object->map_word(); + +static inline bool IsShortcutCandidate(int type) { + return ((type & kShortcutTypeMask) == kShortcutTypeTag); +} + + +static inline void EvacuateShortcutCandidate(Map* map, + HeapObject** slot, + HeapObject* object) { + ASSERT(IsShortcutCandidate(map->instance_type())); + + if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) { + HeapObject* first = + HeapObject::cast(ConsString::cast(object)->unchecked_first()); + + *slot = first; + + if (!Heap::InNewSpace(first)) { + object->set_map_word(MapWord::FromForwardingAddress(first)); + return; + } + + MapWord first_word = first->map_word(); if (first_word.IsForwardingAddress()) { - *p = first_word.ToForwardingAddress(); + HeapObject* target = first_word.ToForwardingAddress(); + + *slot = target; + object->set_map_word(MapWord::FromForwardingAddress(target)); return; } + + first->map()->Scavenge(slot, first); + object->set_map_word(MapWord::FromForwardingAddress(*slot)); + return; } - int object_size = object->SizeFromMap(first_word.ToMap()); - // We rely on live objects in new space to be at least two pointers, - // so we can store the from-space address and map pointer of promoted - // objects in the to space. - ASSERT(object_size >= 2 * kPointerSize); + int object_size = ConsString::kSize; + EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); +} - // If the object should be promoted, we try to copy it to old space. - if (ShouldBePromoted(object->address(), object_size)) { - Object* result; - if (object_size > MaxObjectSizeInPagedSpace()) { - result = lo_space_->AllocateRawFixedArray(object_size); - if (!result->IsFailure()) { - HeapObject* target = HeapObject::cast(result); - - if (object->IsFixedArray()) { - // Save the from-space object pointer and its map pointer at the - // top of the to space to be swept and copied later. Write the - // forwarding address over the map word of the from-space - // object. - promotion_queue.insert(object, first_word.ToMap()); - object->set_map_word(MapWord::FromForwardingAddress(target)); - - // Give the space allocated for the result a proper map by - // treating it as a free list node (not linked into the free - // list). - FreeListNode* node = FreeListNode::FromAddress(target->address()); - node->set_size(object_size); - - *p = target; + +Scavenger Heap::GetScavenger(int instance_type, int instance_size) { + if (instance_type < FIRST_NONSTRING_TYPE) { + switch (instance_type & kStringRepresentationMask) { + case kSeqStringTag: + if ((instance_type & kStringEncodingMask) == kAsciiStringTag) { + return &EvacuateSeqAsciiString; } else { - // In large object space only fixed arrays might possibly contain - // intergenerational references. - // All other objects can be copied immediately and not revisited. - *p = MigrateObject(object, target, object_size); + return &EvacuateSeqTwoByteString; } - tracer()->increment_promoted_objects_size(object_size); - return; - } - } else { - OldSpace* target_space = Heap::TargetSpace(object); - ASSERT(target_space == Heap::old_pointer_space_ || - target_space == Heap::old_data_space_); - result = target_space->AllocateRaw(object_size); - if (!result->IsFailure()) { - HeapObject* target = HeapObject::cast(result); - if (target_space == Heap::old_pointer_space_) { - // Save the from-space object pointer and its map pointer at the - // top of the to space to be swept and copied later. Write the - // forwarding address over the map word of the from-space - // object. - promotion_queue.insert(object, first_word.ToMap()); - object->set_map_word(MapWord::FromForwardingAddress(target)); - - // Give the space allocated for the result a proper map by - // treating it as a free list node (not linked into the free - // list). - FreeListNode* node = FreeListNode::FromAddress(target->address()); - node->set_size(object_size); - - *p = target; + case kConsStringTag: + if (IsShortcutCandidate(instance_type)) { + return &EvacuateShortcutCandidate; } else { - // Objects promoted to the data space can be copied immediately - // and not revisited---we will never sweep that space for - // pointers and the copied objects do not contain pointers to - // new space objects. - *p = MigrateObject(object, target, object_size); -#ifdef DEBUG - VerifyNonPointerSpacePointersVisitor v; - (*p)->Iterate(&v); -#endif + ASSERT(instance_size == ConsString::kSize); + return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT); } - tracer()->increment_promoted_objects_size(object_size); - return; - } + + case kExternalStringTag: + ASSERT(instance_size == ExternalString::kSize); + return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT); } + UNREACHABLE(); + } + + switch (instance_type) { + case BYTE_ARRAY_TYPE: + return reinterpret_cast<Scavenger>(&EvacuateByteArray); + + case FIXED_ARRAY_TYPE: + return reinterpret_cast<Scavenger>(&EvacuateFixedArray); + + case JS_OBJECT_TYPE: + case JS_CONTEXT_EXTENSION_OBJECT_TYPE: + case JS_VALUE_TYPE: + case JS_ARRAY_TYPE: + case JS_REGEXP_TYPE: + case JS_FUNCTION_TYPE: + case JS_GLOBAL_PROXY_TYPE: + case JS_GLOBAL_OBJECT_TYPE: + case JS_BUILTINS_OBJECT_TYPE: + return GetScavengerForSize(instance_size, POINTER_OBJECT); + + case ODDBALL_TYPE: + return NULL; + + case PROXY_TYPE: + return GetScavengerForSize(Proxy::kSize, DATA_OBJECT); + + case MAP_TYPE: + return NULL; + + case CODE_TYPE: + return NULL; + + case JS_GLOBAL_PROPERTY_CELL_TYPE: + return NULL; + + case HEAP_NUMBER_TYPE: + case FILLER_TYPE: + case PIXEL_ARRAY_TYPE: + case EXTERNAL_BYTE_ARRAY_TYPE: + case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE: + case EXTERNAL_SHORT_ARRAY_TYPE: + case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE: + case EXTERNAL_INT_ARRAY_TYPE: + case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE: + case EXTERNAL_FLOAT_ARRAY_TYPE: + return GetScavengerForSize(instance_size, DATA_OBJECT); + + case SHARED_FUNCTION_INFO_TYPE: + return GetScavengerForSize(SharedFunctionInfo::kAlignedSize, + POINTER_OBJECT); + +#define MAKE_STRUCT_CASE(NAME, Name, name) \ + case NAME##_TYPE: + STRUCT_LIST(MAKE_STRUCT_CASE) +#undef MAKE_STRUCT_CASE + return GetScavengerForSize(instance_size, POINTER_OBJECT); + default: + UNREACHABLE(); + return NULL; } - // The object should remain in new space or the old space allocation failed. - Object* result = new_space_.AllocateRaw(object_size); - // Failed allocation at this point is utterly unexpected. - ASSERT(!result->IsFailure()); - *p = MigrateObject(object, HeapObject::cast(result), object_size); +} + + +void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { + ASSERT(InFromSpace(object)); + MapWord first_word = object->map_word(); + ASSERT(!first_word.IsForwardingAddress()); + Map* map = first_word.ToMap(); + map->Scavenge(p, object); } @@ -1243,6 +1404,8 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type, reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); reinterpret_cast<Map*>(result)->set_instance_type(instance_type); reinterpret_cast<Map*>(result)->set_instance_size(instance_size); + reinterpret_cast<Map*>(result)-> + set_scavenger(GetScavenger(instance_type, instance_size)); reinterpret_cast<Map*>(result)->set_inobject_properties(0); reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); reinterpret_cast<Map*>(result)->set_unused_property_fields(0); @@ -1259,6 +1422,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) { Map* map = reinterpret_cast<Map*>(result); map->set_map(meta_map()); map->set_instance_type(instance_type); + map->set_scavenger(GetScavenger(instance_type, instance_size)); map->set_prototype(null_value()); map->set_constructor(null_value()); map->set_instance_size(instance_size); @@ -1891,6 +2055,7 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) { share->set_name(name); Code* illegal = Builtins::builtin(Builtins::Illegal); share->set_code(illegal); + share->set_scope_info(ScopeInfo<>::EmptyHeapObject()); Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric); share->set_construct_stub(construct_stub); share->set_expected_nof_properties(0); @@ -2318,12 +2483,13 @@ static void FlushCodeForFunction(SharedFunctionInfo* function_info) { // Check that there are heap allocated locals in the scopeinfo. If // there is, we are potentially using eval and need the scopeinfo // for variable resolution. - if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code())) + if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->scope_info())) return; HandleScope scope; - // Compute the lazy compilable version of the code. + // Compute the lazy compilable version of the code, clear the scope info. function_info->set_code(*ComputeLazyCompile(function_info->length())); + function_info->set_scope_info(ScopeInfo<>::EmptyHeapObject()); } @@ -2348,7 +2514,6 @@ void Heap::FlushCode() { Object* Heap::CreateCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle<Object> self_reference) { // Allocate ByteArray before the Code object, so that we do not risk @@ -2358,9 +2523,7 @@ Object* Heap::CreateCode(const CodeDesc& desc, // Compute size int body_size = RoundUp(desc.instr_size, kObjectAlignment); - int sinfo_size = 0; - if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL); - int obj_size = Code::SizeFor(body_size, sinfo_size); + int obj_size = Code::SizeFor(body_size); ASSERT(IsAligned(obj_size, Code::kCodeAlignment)); Object* result; if (obj_size > MaxObjectSizeInPagedSpace()) { @@ -2377,7 +2540,6 @@ Object* Heap::CreateCode(const CodeDesc& desc, ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); code->set_instruction_size(desc.instr_size); code->set_relocation_info(ByteArray::cast(reloc_info)); - code->set_sinfo_size(sinfo_size); code->set_flags(flags); // Allow self references to created code object by patching the handle to // point to the newly allocated Code object. @@ -2390,7 +2552,6 @@ Object* Heap::CreateCode(const CodeDesc& desc, // objects. These pointers can include references to the code object itself, // through the self_reference parameter. code->CopyFrom(desc); - if (sinfo != NULL) sinfo->Serialize(code); // write scope info #ifdef DEBUG code->Verify(); @@ -2431,9 +2592,7 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment); - int sinfo_size = code->sinfo_size(); - - int new_obj_size = Code::SizeFor(new_body_size, sinfo_size); + int new_obj_size = Code::SizeFor(new_body_size); Address old_addr = code->address(); @@ -2460,8 +2619,6 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { // Copy patched rinfo. memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length()); - // Copy sinfo. - memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size()); // Relocate the copy. ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); @@ -3657,7 +3814,7 @@ bool Heap::IteratePointersInDirtyMapsRegion( Max(start, prev_map + Map::kPointerFieldsBeginOffset); Address pointer_fields_end = - Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end); + Min(prev_map + Map::kPointerFieldsEndOffset, end); contains_pointers_to_new_space = IteratePointersInDirtyRegion(pointer_fields_start, @@ -3675,10 +3832,11 @@ bool Heap::IteratePointersInDirtyMapsRegion( if (map_aligned_end != end) { ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); - Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset; + Address pointer_fields_start = + map_aligned_end + Map::kPointerFieldsBeginOffset; Address pointer_fields_end = - Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize); + Min(end, map_aligned_end + Map::kPointerFieldsEndOffset); contains_pointers_to_new_space = IteratePointersInDirtyRegion(pointer_fields_start, @@ -3691,9 +3849,9 @@ bool Heap::IteratePointersInDirtyMapsRegion( } -void Heap::IterateAndMarkPointersToNewSpace(Address start, - Address end, - ObjectSlotCallback callback) { +void Heap::IterateAndMarkPointersToFromSpace(Address start, + Address end, + ObjectSlotCallback callback) { Address slot_address = start; Page* page = Page::FromAddress(start); @@ -3701,7 +3859,7 @@ void Heap::IterateAndMarkPointersToNewSpace(Address start, while (slot_address < end) { Object** slot = reinterpret_cast<Object**>(slot_address); - if (Heap::InNewSpace(*slot)) { + if (Heap::InFromSpace(*slot)) { ASSERT((*slot)->IsHeapObject()); callback(reinterpret_cast<HeapObject**>(slot)); if (Heap::InNewSpace(*slot)) { diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index df3ba0ea2a..18991b4c09 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -36,8 +36,6 @@ namespace v8 { namespace internal { -// Forward declarations. -class ZoneScopeInfo; // Defines all the roots in Heap. #define UNCONDITIONAL_STRONG_ROOT_LIST(V) \ @@ -626,7 +624,6 @@ class Heap : public AllStatic { // object by containing this pointer. // Please note this function does not perform a garbage collection. static Object* CreateCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle<Object> self_reference); @@ -774,11 +771,12 @@ class Heap : public AllStatic { DirtyRegionCallback visit_dirty_region, ObjectSlotCallback callback); - // Iterate pointers to new space found in memory interval from start to end. + // Iterate pointers to from semispace of new space found in memory interval + // from start to end. // Update dirty marks for page containing start address. - static void IterateAndMarkPointersToNewSpace(Address start, - Address end, - ObjectSlotCallback callback); + static void IterateAndMarkPointersToFromSpace(Address start, + Address end, + ObjectSlotCallback callback); // Iterate pointers to new space found in memory interval from start to end. // Return true if pointers to new space was found. @@ -985,6 +983,8 @@ class Heap : public AllStatic { static void RecordStats(HeapStats* stats); + static Scavenger GetScavenger(int instance_type, int instance_size); + // Copy block of memory from src to dst. Size of block should be aligned // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); @@ -1232,17 +1232,7 @@ class Heap : public AllStatic { set_instanceof_cache_function(the_hole_value()); } - // Helper function used by CopyObject to copy a source object to an - // allocated target object and update the forwarding pointer in the source - // object. Returns the target object. - static inline HeapObject* MigrateObject(HeapObject* source, - HeapObject* target, - int size); - #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) - // Record the copy of an object in the NewSpace's statistics. - static void RecordCopiedObject(HeapObject* obj); - // Record statistics before and after garbage collection. static void ReportStatisticsBeforeGC(); static void ReportStatisticsAfterGC(); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index ce2099da2d..e011237de0 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -121,7 +121,6 @@ void CpuFeatures::Probe() { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Code>::null()); if (!code->IsCode()) return; diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 0f72074711..6080a8291c 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -6678,11 +6678,8 @@ void CodeGenerator::GenerateRandomHeapNumber( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ push(Immediate(Smi::FromInt(0))); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(edi, eax); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 2ca1105718..244105d8cf 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -2242,11 +2242,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ push(Immediate(Smi::FromInt(0))); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(edi, eax); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index b0de82752b..a7930fb1ed 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -872,7 +872,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { CodeDesc code_desc; masm_->GetCode(&code_desc); Handle<Code> code = Factory::NewCode(code_desc, - NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); PROFILE(RegExpCodeCreateEvent(*code, *source)); diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 26361d10e4..ae33948f3a 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -111,7 +111,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Register receiver, String* name, Register r0, - Register extra) { + Register r1) { ASSERT(name->IsSymbol()); __ IncrementCounter(&Counters::negative_lookups, 1); __ IncrementCounter(&Counters::negative_lookups_miss, 1); @@ -121,11 +121,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, const int kInterceptorOrAccessCheckNeededMask = (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + // Bail out if the receiver has a named interceptor or requires access checks. - __ test(FieldOperand(r0, Map::kBitFieldOffset), - Immediate(kInterceptorOrAccessCheckNeededMask)); + __ test_b(FieldOperand(r0, Map::kBitFieldOffset), + kInterceptorOrAccessCheckNeededMask); __ j(not_zero, miss_label, not_taken); + // Check that receiver is a JSObject. __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE); __ j(below, miss_label, not_taken); @@ -158,10 +160,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, for (int i = 0; i < kProbes; i++) { // r0 points to properties hash. // Compute the masked index: (hash + i + i * i) & mask. - if (extra.is(no_reg)) { - __ push(receiver); - } - Register index = extra.is(no_reg) ? receiver : extra; + Register index = r1; // Capacity is smi 2^n. __ mov(index, FieldOperand(properties, kCapacityOffset)); __ dec(index); @@ -173,27 +172,18 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, ASSERT(StringDictionary::kEntrySize == 3); __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. - Register entity_name = extra.is(no_reg) ? properties : extra; + Register entity_name = r1; // Having undefined at this place means the name is not contained. ASSERT_EQ(kSmiTagSize, 1); __ mov(entity_name, Operand(properties, index, times_half_pointer_size, kElementsStartOffset - kHeapObjectTag)); __ cmp(entity_name, Factory::undefined_value()); - if (extra.is(no_reg)) { - // 'receiver' shares a register with 'entity_name'. - __ pop(receiver); - } if (i != kProbes - 1) { __ j(equal, &done, taken); // Stop if found the property. __ cmp(entity_name, Handle<String>(name)); __ j(equal, miss_label, not_taken); - - if (extra.is(no_reg)) { - // Restore the properties if their register was occupied by the name. - __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset)); - } } else { // Give up probing if still not found the undefined value. __ j(not_equal, miss_label, not_taken); @@ -525,6 +515,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, Label* miss) { ASSERT(holder->HasNamedInterceptor()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); @@ -541,6 +532,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, holder, lookup, name, @@ -552,6 +544,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, name, holder, miss); @@ -564,6 +557,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, JSObject* interceptor_holder, LookupResult* lookup, String* name, @@ -603,7 +597,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, scratch1, - scratch2, name, depth1, miss); + scratch2, scratch3, name, depth1, miss); // Invoke an interceptor and if it provides a value, // branch to |regular_invoke|. @@ -619,7 +613,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { if (interceptor_holder != lookup->holder()) { stub_compiler_->CheckPrototypes(interceptor_holder, receiver, lookup->holder(), scratch1, - scratch2, name, depth2, miss); + scratch2, scratch3, name, depth2, miss); } else { // CheckPrototypes has a side effect of fetching a 'holder' // for API (object which is instanceof for the signature). It's @@ -655,12 +649,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, String* name, JSObject* interceptor_holder, Label* miss_label) { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, + scratch1, scratch2, scratch3, name, miss_label); __ EnterInternalFrame(); @@ -862,14 +857,15 @@ Register StubCompiler::CheckPrototypes(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, int save_at_depth, - Label* miss, - Register extra) { + Label* miss) { // Make sure there's no overlap between holder and object registers. - ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); - ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch)); + ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); // Keep track of the current object in register reg. Register reg = object_reg; JSObject* current = object; @@ -909,31 +905,31 @@ Register StubCompiler::CheckPrototypes(JSObject* object, miss, reg, name, - scratch, - extra); - __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); + scratch1, + scratch2); + __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // from now the object is in holder_reg - __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset)); + __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else if (Heap::InNewSpace(prototype)) { // Get the map of the current object. - __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); - __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map()))); + __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map()))); // Branch on the result of the map check. __ j(not_equal, miss, not_taken); // Check access rights to the global object. This has to happen // after the map check so that we know that the object is // actually a global object. if (current->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(reg, scratch, miss); + __ CheckAccessGlobalProxy(reg, scratch1, miss); // Restore scratch register to be the map of the object. // We load the prototype from the map in the scratch register. - __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); + __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); } // The prototype is in new space; we cannot store a reference // to it in the code. Load it from the map. reg = holder_reg; // from now the object is in holder_reg - __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset)); + __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else { // Check the map of the current object. __ cmp(FieldOperand(reg, HeapObject::kMapOffset), @@ -944,7 +940,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, // after the map check so that we know that the object is // actually a global object. if (current->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(reg, scratch, miss); + __ CheckAccessGlobalProxy(reg, scratch1, miss); } // The prototype is in old space; load it directly. reg = holder_reg; // from now the object is in holder_reg @@ -971,7 +967,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); if (holder->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(reg, scratch, miss); + __ CheckAccessGlobalProxy(reg, scratch1, miss); }; // If we've skipped any global objects, it's not enough to verify @@ -981,7 +977,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, object, holder, name, - scratch, + scratch1, miss); if (result->IsFailure()) set_failure(Failure::cast(result)); @@ -995,6 +991,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, int index, String* name, Label* miss) { @@ -1005,7 +1002,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, // Check the prototype chain. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); // Get the value from the properties. GenerateFastPropertyLoad(masm(), eax, reg, holder, index); @@ -1019,6 +1016,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, AccessorInfo* callback, String* name, Label* miss, @@ -1030,7 +1028,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, // Check that the maps haven't changed. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); Handle<AccessorInfo> callback_handle(callback); @@ -1094,6 +1092,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, Object* value, String* name, Label* miss) { @@ -1104,7 +1103,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, // Check that the maps haven't changed. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); // Return the constant value. __ mov(eax, Handle<Object>(value)); @@ -1119,6 +1118,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, String* name, Label* miss) { ASSERT(interceptor_holder->HasNamedInterceptor()); @@ -1147,7 +1147,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); // Save necessary data before invoking an interceptor. @@ -1195,6 +1196,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, lookup->holder(), scratch1, scratch2, + scratch3, name, miss); } @@ -1235,7 +1237,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); __ pop(scratch2); // save old return address PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, interceptor_holder); @@ -1310,8 +1312,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, __ j(zero, &miss, not_taken); // Do the right check and compute the holder register. - Register reg = CheckPrototypes(object, edx, holder, ebx, eax, - name, &miss, edi); + Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi, + name, &miss); GenerateFastPropertyLoad(masm(), edi, reg, holder, index); @@ -1373,7 +1375,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, CheckPrototypes(JSObject::cast(object), edx, holder, ebx, - eax, name, &miss, edi); + eax, edi, name, &miss); if (argc == 0) { // Noop, return the length. @@ -1519,7 +1521,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, __ j(zero, &miss); CheckPrototypes(JSObject::cast(object), edx, holder, ebx, - eax, name, &miss, edi); + eax, edi, name, &miss); // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); @@ -1594,7 +1596,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, Context::STRING_FUNCTION_INDEX, eax); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, - ebx, edx, name, &miss, edi); + ebx, edx, edi, name, &miss); Register receiver = ebx; Register index = edi; @@ -1659,7 +1661,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, Context::STRING_FUNCTION_INDEX, eax); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, - ebx, edx, name, &miss, edi); + ebx, edx, edi, name, &miss); Register receiver = eax; Register index = edi; @@ -1764,7 +1766,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, // Check that the maps haven't changed. CheckPrototypes(JSObject::cast(object), edx, holder, - ebx, eax, name, depth, &miss, edi); + ebx, eax, edi, name, depth, &miss); // Patch the receiver on the stack with the global proxy if // necessary. @@ -1787,7 +1789,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::STRING_FUNCTION_INDEX, eax); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, - ebx, edx, name, &miss, edi); + ebx, edx, edi, name, &miss); } break; @@ -1807,7 +1809,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::NUMBER_FUNCTION_INDEX, eax); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, - ebx, edx, name, &miss, edi); + ebx, edx, edi, name, &miss); } break; } @@ -1828,7 +1830,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::BOOLEAN_FUNCTION_INDEX, eax); CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, - ebx, edx, name, &miss, edi); + ebx, edx, edi, name, &miss); } break; } @@ -1888,6 +1890,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, edx, ebx, edi, + eax, &miss); // Restore receiver. @@ -1950,7 +1953,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi); + CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss); // Get the value from the cell. __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell))); @@ -2226,7 +2229,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name, // Check the maps of the full prototype chain. Also check that // global property cells up to (but not including) the last object // in the prototype chain are empty. - CheckPrototypes(object, eax, last, ebx, edx, name, &miss); + CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss); // If the last object in the prototype chain is a global object, // check that the global property cell is empty. @@ -2263,7 +2266,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss); + GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -2284,7 +2287,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name, Label miss; Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, + bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback, name, &miss, &failure); if (!success) return failure; @@ -2307,7 +2310,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss); + GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -2338,6 +2341,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, ecx, edx, ebx, + edi, name, &miss); @@ -2370,7 +2374,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi); + CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss); // Get the value from the cell. __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); @@ -2415,7 +2419,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadField(receiver, holder, edx, ebx, ecx, index, name, &miss); + GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_field, 1); @@ -2444,7 +2448,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ j(not_equal, &miss, not_taken); Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, + bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback, name, &miss, &failure); if (!success) return failure; @@ -2474,7 +2478,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadConstant(receiver, holder, edx, ebx, ecx, + GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi, value, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_constant_function, 1); @@ -2510,6 +2514,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, eax, ecx, ebx, + edi, name, &miss); __ bind(&miss); diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 12332f9fd3..9bb18f7e23 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -525,17 +525,12 @@ void CallICBase::UpdateCaches(LookupResult* lookup, // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; -#ifndef V8_TARGET_ARCH_IA32 - // Normal objects only implemented for IA32 by now. - if (HasNormalObjectsInPrototypeChain(lookup, *object)) return; -#else if (lookup->holder() != *object && HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) { // Suppress optimization for prototype chains with slow properties objects // in the middle. return; } -#endif // Compute the number of arguments. int argc = target()->arguments_count(); diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js index cdb10be1aa..e7ec6100e5 100644 --- a/deps/v8/src/json.js +++ b/deps/v8/src/json.js @@ -29,7 +29,7 @@ var $JSON = global.JSON; function ParseJSONUnfiltered(text) { var s = $String(text); - var f = %CompileString(text, true); + var f = %CompileString(s, true); return f(); } diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 95afb4abe2..e2dd2a3e27 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -425,8 +425,10 @@ void MarkCompactCollector::MarkMapContents(Map* map) { // Since the descriptor array has been marked already, it is fine // that one of these fields contains a pointer to it. MarkingVisitor visitor; // Has no state or contents. - visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset), - HeapObject::RawField(map, Map::kSize)); + visitor.VisitPointers(HeapObject::RawField(map, + Map::kPointerFieldsBeginOffset), + HeapObject::RawField(map, + Map::kPointerFieldsEndOffset)); } diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc index 7cb1d20227..ec91cc8779 100644 --- a/deps/v8/src/messages.cc +++ b/deps/v8/src/messages.cc @@ -66,7 +66,8 @@ Handle<Object> MessageHandler::MakeMessageObject( const char* type, MessageLocation* loc, Vector< Handle<Object> > args, - Handle<String> stack_trace) { + Handle<String> stack_trace, + Handle<JSArray> stack_frames) { // Build error message object v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom. Handle<Object> type_str = Factory::LookupAsciiSymbol(type); @@ -90,13 +91,17 @@ Handle<Object> MessageHandler::MakeMessageObject( Handle<Object> stack_trace_val = stack_trace.is_null() ? Factory::undefined_value() : Handle<Object>::cast(stack_trace); - const int argc = 6; + Handle<Object> stack_frames_val = stack_frames.is_null() + ? Factory::undefined_value() + : Handle<Object>::cast(stack_frames); + const int argc = 7; Object** argv[argc] = { type_str.location(), array.location(), start_handle.location(), end_handle.location(), script.location(), - stack_trace_val.location() }; + stack_trace_val.location(), + stack_frames_val.location() }; // Setup a catch handler to catch exceptions in creating the message. This // handler is non-verbose to avoid calling MakeMessage recursively in case of diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h index 80ce8eb9ca..440bde87e9 100644 --- a/deps/v8/src/messages.h +++ b/deps/v8/src/messages.h @@ -96,7 +96,8 @@ class MessageHandler { static Handle<Object> MakeMessageObject(const char* type, MessageLocation* loc, Vector< Handle<Object> > args, - Handle<String> stack_trace); + Handle<String> stack_trace, + Handle<JSArray> stack_frames); // Report a formatted message (needs JS allocation). static void ReportMessage(MessageLocation* loc, Handle<Object> message); diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 99ba45464f..b0f8aa16e4 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -181,7 +181,6 @@ function FormatMessage(message) { // RangeError invalid_array_length: "Invalid array length", stack_overflow: "Maximum call stack size exceeded", - apply_overflow: "Function.prototype.apply cannot support %0 arguments", // SyntaxError unable_to_parse: "Parse error", duplicate_regexp_flag: "Duplicate RegExp flag %0", @@ -601,18 +600,22 @@ function GetPositionInLine(message) { } -function ErrorMessage(type, args, startPos, endPos, script, stackTrace) { +function ErrorMessage(type, args, startPos, endPos, script, stackTrace, + stackFrames) { this.startPos = startPos; this.endPos = endPos; this.type = type; this.args = args; this.script = script; this.stackTrace = stackTrace; + this.stackFrames = stackFrames; } -function MakeMessage(type, args, startPos, endPos, script, stackTrace) { - return new ErrorMessage(type, args, startPos, endPos, script, stackTrace); +function MakeMessage(type, args, startPos, endPos, script, stackTrace, + stackFrames) { + return new ErrorMessage(type, args, startPos, endPos, script, stackTrace, + stackFrames); } diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 0b5ff9932d..d340e4b598 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -789,6 +789,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() { CHECK(IsSharedFunctionInfo()); VerifyObjectField(kNameOffset); VerifyObjectField(kCodeOffset); + VerifyObjectField(kScopeInfoOffset); VerifyObjectField(kInstanceClassNameOffset); VerifyObjectField(kFunctionDataOffset); VerifyObjectField(kScriptOffset); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 0e45550845..7b874d527c 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -2060,6 +2060,21 @@ void ExternalFloatArray::set(int index, float value) { ptr[index] = value; } +inline Scavenger Map::scavenger() { + Scavenger callback = reinterpret_cast<Scavenger>( + READ_INTPTR_FIELD(this, kScavengerCallbackOffset)); + + ASSERT(callback == Heap::GetScavenger(instance_type(), + instance_size())); + + return callback; +} + +inline void Map::set_scavenger(Scavenger callback) { + WRITE_INTPTR_FIELD(this, + kScavengerCallbackOffset, + reinterpret_cast<intptr_t>(callback)); +} int Map::instance_size() { return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; @@ -2496,6 +2511,7 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) #endif ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) +ACCESSORS(SharedFunctionInfo, scope_info, Object, kScopeInfoOffset) ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) ACCESSORS(SharedFunctionInfo, instance_class_name, Object, kInstanceClassNameOffset) @@ -2808,7 +2824,6 @@ JSValue* JSValue::cast(Object* obj) { INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) -INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset) byte* Code::instruction_start() { @@ -2852,11 +2867,6 @@ bool Code::contains(byte* pc) { } -byte* Code::sinfo_start() { - return FIELD_ADDR(this, kHeaderSize + body_size()); -} - - ACCESSORS(JSArray, length, Object, kLengthOffset) diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index e79a5505c9..d9efe6cea7 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -2190,6 +2190,8 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode, int new_instance_size = map()->instance_size() - instance_size_delta; new_map->set_inobject_properties(0); new_map->set_instance_size(new_instance_size); + new_map->set_scavenger(Heap::GetScavenger(new_map->instance_type(), + new_map->instance_size())); Heap::CreateFillerObjectAt(this->address() + new_instance_size, instance_size_delta); } @@ -5033,7 +5035,7 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) { void Map::MapIterateBody(ObjectVisitor* v) { // Assumes all Object* members are contiguously allocated! - IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize); + IteratePointers(v, kPointerFieldsBeginOffset, kPointerFieldsEndOffset); } @@ -5325,8 +5327,6 @@ void Code::CodeIterateBody(ObjectVisitor* v) { for (; !it.done(); it.next()) { it.rinfo()->Visit(v); } - - ScopeInfo<>::IterateScopeInfo(this, v); } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 4a7dee6a83..c2110eabac 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -2744,10 +2744,6 @@ class Code: public HeapObject { inline int relocation_size(); - // [sinfo_size]: Size of scope information. - inline int sinfo_size(); - inline void set_sinfo_size(int value); - // [flags]: Various code flags. inline Flags flags(); inline void set_flags(Flags flags); @@ -2816,9 +2812,6 @@ class Code: public HeapObject { // Returns true if pc is inside this object's instructions. inline bool contains(byte* pc); - // Returns the address of the scope information. - inline byte* sinfo_start(); - // Relocate the code by delta bytes. Called to signal that this code // object has been moved by delta bytes. void Relocate(intptr_t delta); @@ -2826,12 +2819,10 @@ class Code: public HeapObject { // Migrate code described by desc. void CopyFrom(const CodeDesc& desc); - // Returns the object size for a given body and sinfo size (Used for - // allocation). - static int SizeFor(int body_size, int sinfo_size) { + // Returns the object size for a given body (used for allocation). + static int SizeFor(int body_size) { ASSERT_SIZE_TAG_ALIGNED(body_size); - ASSERT_SIZE_TAG_ALIGNED(sinfo_size); - return RoundUp(kHeaderSize + body_size + sinfo_size, kCodeAlignment); + return RoundUp(kHeaderSize + body_size, kCodeAlignment); } // Calculate the size of the code object to report for log events. This takes @@ -2851,7 +2842,7 @@ class Code: public HeapObject { static inline Code* cast(Object* obj); // Dispatched behavior. - int CodeSize() { return SizeFor(body_size(), sinfo_size()); } + int CodeSize() { return SizeFor(body_size()); } void CodeIterateBody(ObjectVisitor* v); #ifdef DEBUG void CodePrint(); @@ -2865,8 +2856,7 @@ class Code: public HeapObject { // Layout description. static const int kInstructionSizeOffset = HeapObject::kHeaderSize; static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize; - static const int kSInfoSizeOffset = kRelocationInfoOffset + kPointerSize; - static const int kFlagsOffset = kSInfoSizeOffset + kIntSize; + static const int kFlagsOffset = kRelocationInfoOffset + kPointerSize; static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize; // Add padding to align the instruction start following right after // the Code object header. @@ -2899,6 +2889,7 @@ class Code: public HeapObject { DISALLOW_IMPLICIT_CONSTRUCTORS(Code); }; +typedef void (*Scavenger)(Map* map, HeapObject** slot, HeapObject* object); // All heap objects have a Map that describes their structure. // A Map contains information about: @@ -3100,6 +3091,13 @@ class Map: public HeapObject { void MapVerify(); #endif + inline Scavenger scavenger(); + inline void set_scavenger(Scavenger callback); + + inline void Scavenge(HeapObject** slot, HeapObject* obj) { + scavenger()(this, slot, obj); + } + static const int kMaxPreAllocatedPropertyFields = 255; // Layout description. @@ -3110,7 +3108,8 @@ class Map: public HeapObject { static const int kInstanceDescriptorsOffset = kConstructorOffset + kPointerSize; static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize; - static const int kPadStart = kCodeCacheOffset + kPointerSize; + static const int kScavengerCallbackOffset = kCodeCacheOffset + kPointerSize; + static const int kPadStart = kScavengerCallbackOffset + kPointerSize; static const int kSize = MAP_POINTER_ALIGN(kPadStart); // Layout of pointer fields. Heap iteration code relies on them @@ -3273,6 +3272,9 @@ class SharedFunctionInfo: public HeapObject { // [code]: Function code. DECL_ACCESSORS(code, Code) + // [scope_info]: Scope info. + DECL_ACCESSORS(scope_info, Object) + // [construct stub]: Code stub for constructing instances of this function. DECL_ACCESSORS(construct_stub, Code) @@ -3426,7 +3428,8 @@ class SharedFunctionInfo: public HeapObject { // Pointer fields. static const int kNameOffset = HeapObject::kHeaderSize; static const int kCodeOffset = kNameOffset + kPointerSize; - static const int kConstructStubOffset = kCodeOffset + kPointerSize; + static const int kScopeInfoOffset = kCodeOffset + kPointerSize; + static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize; static const int kInstanceClassNameOffset = kConstructStubOffset + kPointerSize; static const int kFunctionDataOffset = diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index fb58cfa3f7..e215890639 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -1968,8 +1968,8 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) { const int literals = fun->NumberOfLiterals(); Handle<Code> code = Handle<Code>(fun->shared()->code()); Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub()); - Handle<SharedFunctionInfo> shared = - Factory::NewSharedFunctionInfo(name, literals, code); + Handle<SharedFunctionInfo> shared = Factory::NewSharedFunctionInfo( + name, literals, code, Handle<Object>(fun->shared()->scope_info())); shared->set_construct_stub(*construct_stub); // Copy the function data to the shared function info. diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index e3ae867e1a..58ff15408d 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -83,6 +83,12 @@ void OS::Setup() { } +void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { + __asm__ __volatile__("" : : : "memory"); + *ptr = value; +} + + uint64_t OS::CpuFeaturesImpliedByPlatform() { return 0; // OpenBSD runs on anything. } diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index 57ff6610e8..b64ee2ec35 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -1555,13 +1555,12 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj, JSFunction* func = JSFunction::cast(js_obj); Context* context = func->context(); ZoneScope zscope(DELETE_ON_EXIT); - ScopeInfo<ZoneListAllocationPolicy> scope_info( - context->closure()->shared()->code()); - int locals_number = scope_info.NumberOfLocals(); + Object* scope_info = context->closure()->shared()->scope_info(); + ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(scope_info); + int locals_number = zone_scope_info.NumberOfLocals(); for (int i = 0; i < locals_number; ++i) { - String* local_name = *scope_info.LocalName(i); - int idx = ScopeInfo<>::ContextSlotIndex( - context->closure()->shared()->code(), local_name, NULL); + String* local_name = *zone_scope_info.LocalName(i); + int idx = ScopeInfo<>::ContextSlotIndex(scope_info, local_name, NULL); if (idx >= 0 && idx < context->length()) { snapshot_->SetClosureReference(entry, local_name, context->get(idx)); } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 4a0fe7ae22..535d9a973b 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -1606,9 +1606,10 @@ static Object* Runtime_SetCode(Arguments args) { if (!EnsureCompiled(shared, KEEP_EXCEPTION)) { return Failure::Exception(); } - // Set the code, formal parameter count, and the length of the target - // function. + // Set the code, scope info, formal parameter count, + // and the length of the target function. target->set_code(fun->code()); + target->shared()->set_scope_info(shared->scope_info()); target->shared()->set_length(shared->length()); target->shared()->set_formal_parameter_count( shared->formal_parameter_count()); @@ -5608,6 +5609,14 @@ static Object* Runtime_NumberUnaryMinus(Arguments args) { } +static Object* Runtime_NumberAlloc(Arguments args) { + NoHandleAllocation ha; + ASSERT(args.length() == 0); + + return Heap::NumberFromDouble(9876543210.0); +} + + static Object* Runtime_NumberDiv(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 2); @@ -6860,7 +6869,8 @@ static Object* Runtime_NewContext(Arguments args) { ASSERT(args.length() == 1); CONVERT_CHECKED(JSFunction, function, args[0]); - int length = ScopeInfo<>::NumberOfContextSlots(function->code()); + int length = + ScopeInfo<>::NumberOfContextSlots(function->shared()->scope_info()); Object* result = Heap::AllocateFunctionContext(length, function); if (result->IsFailure()) return result; @@ -8480,9 +8490,10 @@ static Object* Runtime_GetFrameDetails(Arguments args) { // Check for constructor frame. bool constructor = it.frame()->IsConstructor(); - // Get code and read scope info from it for local variable information. - Handle<Code> code(it.frame()->code()); - ScopeInfo<> info(*code); + // Get scope info and read from it for local variable information. + Handle<JSFunction> function(JSFunction::cast(it.frame()->function())); + Handle<Object> scope_info(function->shared()->scope_info()); + ScopeInfo<> info(*scope_info); // Get the context. Handle<Context> context(Context::cast(it.frame()->context())); @@ -8510,7 +8521,8 @@ static Object* Runtime_GetFrameDetails(Arguments args) { } ASSERT(context->is_function_context()); locals->set(i * 2 + 1, - context->get(ScopeInfo<>::ContextSlotIndex(*code, *name, + context->get(ScopeInfo<>::ContextSlotIndex(*scope_info, + *name, NULL))); } } @@ -8651,7 +8663,7 @@ static Object* Runtime_GetFrameDetails(Arguments args) { // Copy all the context locals into an object used to materialize a scope. -static void CopyContextLocalsToScopeObject(Handle<Code> code, +static void CopyContextLocalsToScopeObject(Handle<SharedFunctionInfo> shared, ScopeInfo<>& scope_info, Handle<Context> context, Handle<JSObject> scope_object) { @@ -8660,7 +8672,7 @@ static void CopyContextLocalsToScopeObject(Handle<Code> code, i < scope_info.number_of_context_slots(); i++) { int context_index = - ScopeInfo<>::ContextSlotIndex(*code, + ScopeInfo<>::ContextSlotIndex(shared->scope_info(), *scope_info.context_slot_name(i), NULL); @@ -8678,8 +8690,8 @@ static void CopyContextLocalsToScopeObject(Handle<Code> code, // frame. static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { Handle<JSFunction> function(JSFunction::cast(frame->function())); - Handle<Code> code(function->code()); - ScopeInfo<> scope_info(*code); + Handle<SharedFunctionInfo> shared(function->shared()); + ScopeInfo<> scope_info(shared->scope_info()); // Allocate and initialize a JSObject with all the arguments, stack locals // heap locals and extension properties of the debugged function. @@ -8702,7 +8714,7 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { // Third fill all context locals. Handle<Context> frame_context(Context::cast(frame->context())); Handle<Context> function_context(frame_context->fcontext()); - CopyContextLocalsToScopeObject(code, scope_info, + CopyContextLocalsToScopeObject(shared, scope_info, function_context, local_scope); // Finally copy any properties from the function context extension. This will @@ -8729,8 +8741,8 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) { static Handle<JSObject> MaterializeClosure(Handle<Context> context) { ASSERT(context->is_function_context()); - Handle<Code> code(context->closure()->code()); - ScopeInfo<> scope_info(*code); + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + ScopeInfo<> scope_info(shared->scope_info()); // Allocate and initialize a JSObject with all the content of theis function // closure. @@ -8738,7 +8750,7 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) { // Check whether the arguments shadow object exists. int arguments_shadow_index = - ScopeInfo<>::ContextSlotIndex(*code, + ScopeInfo<>::ContextSlotIndex(shared->scope_info(), Heap::arguments_shadow_symbol(), NULL); if (arguments_shadow_index >= 0) { @@ -8754,7 +8766,7 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) { } // Fill all context locals to the context extension. - CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope); + CopyContextLocalsToScopeObject(shared, scope_info, context, closure_scope); // Finally copy any properties from the function context extension. This will // be variables introduced by eval. @@ -8803,8 +8815,8 @@ class ScopeIterator { // created for evaluating top level code and it is not a real local scope. // Checking for the existence of .result seems fragile, but the scope info // saved with the code object does not otherwise have that information. - Handle<Code> code(function_->code()); - int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol()); + int index = ScopeInfo<>::StackSlotIndex(function_->shared()->scope_info(), + Heap::result_symbol()); at_local_ = index < 0; } else if (context_->is_function_context()) { at_local_ = true; @@ -8918,8 +8930,7 @@ class ScopeIterator { case ScopeIterator::ScopeTypeLocal: { PrintF("Local:\n"); - Handle<Code> code(function_->code()); - ScopeInfo<> scope_info(*code); + ScopeInfo<> scope_info(function_->shared()->scope_info()); scope_info.Print(); if (!CurrentContext().is_null()) { CurrentContext()->Print(); @@ -9443,7 +9454,7 @@ static Handle<Context> CopyWithContextChain(Handle<Context> context_chain, // Runtime_DebugEvaluate. static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame, Handle<JSFunction> function, - Handle<Code> code, + Handle<Object> scope_info, const ScopeInfo<>* sinfo, Handle<Context> function_context) { // Try to find the value of 'arguments' to pass as parameter. If it is not @@ -9451,14 +9462,14 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame, // does not support eval) then create an 'arguments' object. int index; if (sinfo->number_of_stack_slots() > 0) { - index = ScopeInfo<>::StackSlotIndex(*code, Heap::arguments_symbol()); + index = ScopeInfo<>::StackSlotIndex(*scope_info, Heap::arguments_symbol()); if (index != -1) { return Handle<Object>(frame->GetExpression(index)); } } if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) { - index = ScopeInfo<>::ContextSlotIndex(*code, Heap::arguments_symbol(), + index = ScopeInfo<>::ContextSlotIndex(*scope_info, Heap::arguments_symbol(), NULL); if (index != -1) { return Handle<Object>(function_context->get(index)); @@ -9510,8 +9521,8 @@ static Object* Runtime_DebugEvaluate(Arguments args) { JavaScriptFrameIterator it(id); JavaScriptFrame* frame = it.frame(); Handle<JSFunction> function(JSFunction::cast(frame->function())); - Handle<Code> code(function->code()); - ScopeInfo<> sinfo(*code); + Handle<Object> scope_info(function->shared()->scope_info()); + ScopeInfo<> sinfo(*scope_info); // Traverse the saved contexts chain to find the active context for the // selected frame. @@ -9533,7 +9544,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) { Factory::NewFunction(Factory::empty_string(), Factory::undefined_value()); go_between->set_context(function->context()); #ifdef DEBUG - ScopeInfo<> go_between_sinfo(go_between->shared()->code()); + ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info()); ASSERT(go_between_sinfo.number_of_parameters() == 0); ASSERT(go_between_sinfo.number_of_context_slots() == 0); #endif @@ -9579,8 +9590,8 @@ static Object* Runtime_DebugEvaluate(Arguments args) { &has_pending_exception); if (has_pending_exception) return Failure::Exception(); - Handle<Object> arguments = GetArgumentsObject(frame, function, code, &sinfo, - function_context); + Handle<Object> arguments = GetArgumentsObject(frame, function, scope_info, + &sinfo, function_context); // Invoke the evaluation function and return the result. const int argc = 2; diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 5719fc8955..1c9bb08057 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -115,6 +115,7 @@ namespace internal { F(NumberDiv, 2, 1) \ F(NumberMod, 2, 1) \ F(NumberUnaryMinus, 1, 1) \ + F(NumberAlloc, 0, 1) \ \ F(StringAdd, 2, 1) \ F(StringBuilderConcat, 3, 1) \ diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index ab6e3e9df1..dfe37f50a2 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -431,7 +431,7 @@ function APPLY_PREPARE(args) { // big enough, but sanity check the value to avoid overflow when // multiplying with pointer size. if (length > 0x800000) { - throw %MakeRangeError('apply_overflow', [length]); + throw %MakeRangeError('stack_overflow', []); } if (!IS_FUNCTION(this)) { @@ -450,7 +450,7 @@ function APPLY_PREPARE(args) { function APPLY_OVERFLOW(length) { - throw %MakeRangeError('apply_overflow', [length]); + throw %MakeRangeError('stack_overflow', []); } diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc index 286f515b84..ca0e2d86e1 100755 --- a/deps/v8/src/scanner.cc +++ b/deps/v8/src/scanner.cc @@ -341,8 +341,7 @@ Scanner::Scanner(ParserMode pre) void Scanner::Initialize(Handle<String> source, ParserLanguage language) { - safe_string_input_buffer_.Reset(source.location()); - Init(source, &safe_string_input_buffer_, 0, source->length(), language); + Init(source, NULL, 0, source->length(), language); } @@ -357,9 +356,7 @@ void Scanner::Initialize(Handle<String> source, int start_position, int end_position, ParserLanguage language) { - safe_string_input_buffer_.Reset(source.location()); - Init(source, &safe_string_input_buffer_, - start_position, end_position, language); + Init(source, NULL, start_position, end_position, language); } @@ -368,6 +365,10 @@ void Scanner::Init(Handle<String> source, int start_position, int end_position, ParserLanguage language) { + // Either initialize the scanner from a character stream or from a + // string. + ASSERT(source.is_null() || stream == NULL); + // Initialize the source buffer. if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) { two_byte_string_buffer_.Initialize( @@ -382,6 +383,10 @@ void Scanner::Init(Handle<String> source, end_position); source_ = &ascii_string_buffer_; } else { + if (!source.is_null()) { + safe_string_input_buffer_.Reset(source.location()); + stream = &safe_string_input_buffer_; + } char_stream_buffer_.Initialize(source, stream, start_position, diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index 2091ca726c..16c88b5171 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -148,7 +148,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope) } -// Encoding format in the Code object: +// Encoding format in a FixedArray object: // // - function name // @@ -244,22 +244,45 @@ static Object** ReadList(Object** p, template<class Allocator> -ScopeInfo<Allocator>::ScopeInfo(Code* code) +Handle<Object> ScopeInfo<Allocator>::CreateHeapObject(Scope* scope) { + ScopeInfo<ZoneListAllocationPolicy> sinfo(scope); + return sinfo.Serialize(); +} + + +template<class Allocator> +Object* ScopeInfo<Allocator>::EmptyHeapObject() { + return Heap::empty_fixed_array(); +} + + +inline bool IsNotEmpty(Object* data) { + return FixedArray::cast(data)->length() != 0; +} + + +inline Object** GetDataStart(Object* data) { + return FixedArray::cast(data)->data_start(); +} + + +template<class Allocator> +ScopeInfo<Allocator>::ScopeInfo(Object* data) : function_name_(Factory::empty_symbol()), parameters_(4), stack_slots_(8), context_slots_(8), context_modes_(8) { - if (code == NULL || code->sinfo_size() == 0) return; - - Object** p0 = &Memory::Object_at(code->sinfo_start()); - Object** p = p0; - p = ReadSymbol(p, &function_name_); - p = ReadBool(p, &calls_eval_); - p = ReadList<Allocator>(p, &context_slots_, &context_modes_); - p = ReadList<Allocator>(p, ¶meters_); - p = ReadList<Allocator>(p, &stack_slots_); - ASSERT((p - p0) * kPointerSize == code->sinfo_size()); + if (IsNotEmpty(data)) { + Object** p0 = GetDataStart(data); + Object** p = p0; + p = ReadSymbol(p, &function_name_); + p = ReadBool(p, &calls_eval_); + p = ReadList<Allocator>(p, &context_slots_, &context_modes_); + p = ReadList<Allocator>(p, ¶meters_); + p = ReadList<Allocator>(p, &stack_slots_); + ASSERT((p - p0) == FixedArray::cast(data)->length()); + } } @@ -313,57 +336,49 @@ static Object** WriteList(Object** p, template<class Allocator> -int ScopeInfo<Allocator>::Serialize(Code* code) { +Handle<Object> ScopeInfo<Allocator>::Serialize() { // function name, calls eval, length & sentinel for 3 tables: const int extra_slots = 1 + 1 + 2 * 3; - int size = (extra_slots + - context_slots_.length() * 2 + - parameters_.length() + - stack_slots_.length()) * kPointerSize; - - if (code != NULL) { - CHECK(code->sinfo_size() == size); - Object** p0 = &Memory::Object_at(code->sinfo_start()); - Object** p = p0; - p = WriteSymbol(p, function_name_); - p = WriteBool(p, calls_eval_); - p = WriteList(p, &context_slots_, &context_modes_); - p = WriteList(p, ¶meters_); - p = WriteList(p, &stack_slots_); - ASSERT((p - p0) * kPointerSize == size); - } + int length = extra_slots + + context_slots_.length() * 2 + + parameters_.length() + + stack_slots_.length(); - return size; -} + Handle<Object> data(Factory::NewFixedArray(length, TENURED)); + AssertNoAllocation nogc; + Object** p0 = GetDataStart(*data); + Object** p = p0; + p = WriteSymbol(p, function_name_); + p = WriteBool(p, calls_eval_); + p = WriteList(p, &context_slots_, &context_modes_); + p = WriteList(p, ¶meters_); + p = WriteList(p, &stack_slots_); + ASSERT((p - p0) == length); -template<class Allocator> -void ScopeInfo<Allocator>::IterateScopeInfo(Code* code, ObjectVisitor* v) { - Object** start = &Memory::Object_at(code->sinfo_start()); - Object** end = &Memory::Object_at(code->sinfo_start() + code->sinfo_size()); - v->VisitPointers(start, end); + return data; } -static Object** ContextEntriesAddr(Code* code) { - ASSERT(code->sinfo_size() > 0); +static Object** ContextEntriesAddr(Object* data) { + ASSERT(IsNotEmpty(data)); // +2 for function name and calls eval: - return &Memory::Object_at(code->sinfo_start()) + 2; + return GetDataStart(data) + 2; } -static Object** ParameterEntriesAddr(Code* code) { - ASSERT(code->sinfo_size() > 0); - Object** p = ContextEntriesAddr(code); +static Object** ParameterEntriesAddr(Object* data) { + ASSERT(IsNotEmpty(data)); + Object** p = ContextEntriesAddr(data); int n; // number of context slots; p = ReadInt(p, &n); return p + n*2 + 1; // *2 for pairs, +1 for sentinel } -static Object** StackSlotEntriesAddr(Code* code) { - ASSERT(code->sinfo_size() > 0); - Object** p = ParameterEntriesAddr(code); +static Object** StackSlotEntriesAddr(Object* data) { + ASSERT(IsNotEmpty(data)); + Object** p = ParameterEntriesAddr(data); int n; // number of parameter slots; p = ReadInt(p, &n); return p + n + 1; // +1 for sentinel @@ -371,10 +386,10 @@ static Object** StackSlotEntriesAddr(Code* code) { template<class Allocator> -bool ScopeInfo<Allocator>::CallsEval(Code* code) { - if (code->sinfo_size() > 0) { +bool ScopeInfo<Allocator>::CallsEval(Object* data) { + if (IsNotEmpty(data)) { // +1 for function name: - Object** p = &Memory::Object_at(code->sinfo_start()) + 1; + Object** p = GetDataStart(data) + 1; bool calls_eval; p = ReadBool(p, &calls_eval); return calls_eval; @@ -384,9 +399,9 @@ bool ScopeInfo<Allocator>::CallsEval(Code* code) { template<class Allocator> -int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) { - if (code->sinfo_size() > 0) { - Object** p = StackSlotEntriesAddr(code); +int ScopeInfo<Allocator>::NumberOfStackSlots(Object* data) { + if (IsNotEmpty(data)) { + Object** p = StackSlotEntriesAddr(data); int n; // number of stack slots; ReadInt(p, &n); return n; @@ -396,9 +411,9 @@ int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) { template<class Allocator> -int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) { - if (code->sinfo_size() > 0) { - Object** p = ContextEntriesAddr(code); +int ScopeInfo<Allocator>::NumberOfContextSlots(Object* data) { + if (IsNotEmpty(data)) { + Object** p = ContextEntriesAddr(data); int n; // number of context slots; ReadInt(p, &n); return n + Context::MIN_CONTEXT_SLOTS; @@ -408,9 +423,9 @@ int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) { template<class Allocator> -bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) { - if (code->sinfo_size() > 0) { - Object** p = ContextEntriesAddr(code); +bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Object* data) { + if (IsNotEmpty(data)) { + Object** p = ContextEntriesAddr(data); int n; // number of context slots; ReadInt(p, &n); return n > 0; @@ -420,14 +435,14 @@ bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) { template<class Allocator> -int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) { +int ScopeInfo<Allocator>::StackSlotIndex(Object* data, String* name) { ASSERT(name->IsSymbol()); - if (code->sinfo_size() > 0) { + if (IsNotEmpty(data)) { // Loop below depends on the NULL sentinel after the stack slot names. - ASSERT(NumberOfStackSlots(code) > 0 || - *(StackSlotEntriesAddr(code) + 1) == NULL); + ASSERT(NumberOfStackSlots(data) > 0 || + *(StackSlotEntriesAddr(data) + 1) == NULL); // slots start after length entry - Object** p0 = StackSlotEntriesAddr(code) + 1; + Object** p0 = StackSlotEntriesAddr(data) + 1; Object** p = p0; while (*p != NULL) { if (*p == name) return static_cast<int>(p - p0); @@ -439,19 +454,19 @@ int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) { template<class Allocator> -int ScopeInfo<Allocator>::ContextSlotIndex(Code* code, +int ScopeInfo<Allocator>::ContextSlotIndex(Object* data, String* name, Variable::Mode* mode) { ASSERT(name->IsSymbol()); - int result = ContextSlotCache::Lookup(code, name, mode); + int result = ContextSlotCache::Lookup(data, name, mode); if (result != ContextSlotCache::kNotFound) return result; - if (code->sinfo_size() > 0) { + if (IsNotEmpty(data)) { // Loop below depends on the NULL sentinel after the context slot names. - ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS || - *(ContextEntriesAddr(code) + 1) == NULL); + ASSERT(NumberOfContextSlots(data) >= Context::MIN_CONTEXT_SLOTS || + *(ContextEntriesAddr(data) + 1) == NULL); // slots start after length entry - Object** p0 = ContextEntriesAddr(code) + 1; + Object** p0 = ContextEntriesAddr(data) + 1; Object** p = p0; // contexts may have no variable slots (in the presence of eval()). while (*p != NULL) { @@ -462,21 +477,21 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code, Variable::Mode mode_value = static_cast<Variable::Mode>(v); if (mode != NULL) *mode = mode_value; result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS; - ContextSlotCache::Update(code, name, mode_value, result); + ContextSlotCache::Update(data, name, mode_value, result); return result; } p += 2; } } - ContextSlotCache::Update(code, name, Variable::INTERNAL, -1); + ContextSlotCache::Update(data, name, Variable::INTERNAL, -1); return -1; } template<class Allocator> -int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) { +int ScopeInfo<Allocator>::ParameterIndex(Object* data, String* name) { ASSERT(name->IsSymbol()); - if (code->sinfo_size() > 0) { + if (IsNotEmpty(data)) { // We must read parameters from the end since for // multiply declared parameters the value of the // last declaration of that parameter is used @@ -487,7 +502,7 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) { // once, with corresponding index. This requires a new // implementation of the ScopeInfo code. See also other // comments in this file regarding this. - Object** p = ParameterEntriesAddr(code); + Object** p = ParameterEntriesAddr(data); int n; // number of parameters Object** p0 = ReadInt(p, &n); p = p0 + n; @@ -501,12 +516,12 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) { template<class Allocator> -int ScopeInfo<Allocator>::FunctionContextSlotIndex(Code* code, String* name) { +int ScopeInfo<Allocator>::FunctionContextSlotIndex(Object* data, String* name) { ASSERT(name->IsSymbol()); - if (code->sinfo_size() > 0) { - Object** p = &Memory::Object_at(code->sinfo_start()); + if (IsNotEmpty(data)) { + Object** p = GetDataStart(data); if (*p == name) { - p = ContextEntriesAddr(code); + p = ContextEntriesAddr(data); int n; // number of context slots ReadInt(p, &n); ASSERT(n != 0); @@ -544,20 +559,20 @@ int ScopeInfo<Allocator>::NumberOfLocals() const { } -int ContextSlotCache::Hash(Code* code, String* name) { +int ContextSlotCache::Hash(Object* data, String* name) { // Uses only lower 32 bits if pointers are larger. uintptr_t addr_hash = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2; + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2; return static_cast<int>((addr_hash ^ name->Hash()) % kLength); } -int ContextSlotCache::Lookup(Code* code, +int ContextSlotCache::Lookup(Object* data, String* name, Variable::Mode* mode) { - int index = Hash(code, name); + int index = Hash(data, name); Key& key = keys_[index]; - if ((key.code == code) && key.name->Equals(name)) { + if ((key.data == data) && key.name->Equals(name)) { Value result(values_[index]); if (mode != NULL) *mode = result.mode(); return result.index() + kNotFound; @@ -566,28 +581,28 @@ int ContextSlotCache::Lookup(Code* code, } -void ContextSlotCache::Update(Code* code, +void ContextSlotCache::Update(Object* data, String* name, Variable::Mode mode, int slot_index) { String* symbol; ASSERT(slot_index > kNotFound); if (Heap::LookupSymbolIfExists(name, &symbol)) { - int index = Hash(code, symbol); + int index = Hash(data, symbol); Key& key = keys_[index]; - key.code = code; + key.data = data; key.name = symbol; // Please note value only takes a uint as index. values_[index] = Value(mode, slot_index - kNotFound).raw(); #ifdef DEBUG - ValidateEntry(code, name, mode, slot_index); + ValidateEntry(data, name, mode, slot_index); #endif } } void ContextSlotCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].code = NULL; + for (int index = 0; index < kLength; index++) keys_[index].data = NULL; } @@ -599,15 +614,15 @@ uint32_t ContextSlotCache::values_[ContextSlotCache::kLength]; #ifdef DEBUG -void ContextSlotCache::ValidateEntry(Code* code, +void ContextSlotCache::ValidateEntry(Object* data, String* name, Variable::Mode mode, int slot_index) { String* symbol; if (Heap::LookupSymbolIfExists(name, &symbol)) { - int index = Hash(code, name); + int index = Hash(data, name); Key& key = keys_[index]; - ASSERT(key.code == code); + ASSERT(key.data == data); ASSERT(key.name->Equals(name)); Value result(values_[index]); ASSERT(result.mode() == mode); diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h index 9fb26d0339..34bbdec47b 100644 --- a/deps/v8/src/scopeinfo.h +++ b/deps/v8/src/scopeinfo.h @@ -37,7 +37,7 @@ namespace internal { // Scope information represents information about a functions's // scopes (currently only one, because we don't do any inlining) // and the allocation of the scope's variables. Scope information -// is stored in a compressed form with Code objects and is used +// is stored in a compressed form in FixedArray objects and is used // at runtime (stack dumps, deoptimization, etc.). // // Historical note: In other VMs built by this team, ScopeInfo was @@ -54,23 +54,16 @@ class ScopeInfo BASE_EMBEDDED { // Create a ScopeInfo instance from a scope. explicit ScopeInfo(Scope* scope); - // Create a ScopeInfo instance from a Code object. - explicit ScopeInfo(Code* code); + // Create a ScopeInfo instance from an Object holding the serialized data. + explicit ScopeInfo(Object* data); - // Write the ScopeInfo data into a Code object, and returns the - // amount of space that was needed. If no Code object is provided - // (NULL handle), Serialize() only returns the amount of space needed. - // - // This operations requires that the Code object has the correct amount - // of space for the ScopeInfo data; otherwise the operation fails (fatal - // error). Any existing scope info in the Code object is simply overwritten. - int Serialize(Code* code); + // Creates a heap object holding the serialized scope info. + Handle<Object> Serialize(); - // Garbage collection support for scope info embedded in Code objects. - // This code is in ScopeInfo because only here we should have to know - // about the encoding. - static void IterateScopeInfo(Code* code, ObjectVisitor* v); + static Handle<Object> CreateHeapObject(Scope* scope); + // Serializes empty scope info. + static Object* EmptyHeapObject(); // -------------------------------------------------------------------------- // Lookup @@ -100,44 +93,44 @@ class ScopeInfo BASE_EMBEDDED { // object. // // ScopeInfo is the only class which should have to know about the - // encoding of it's information in a Code object, which is why these + // encoding of it's information in a FixedArray object, which is why these // functions are in this class. // Does this scope call eval. - static bool CallsEval(Code* code); + static bool CallsEval(Object* data); // Return the number of stack slots for code. - static int NumberOfStackSlots(Code* code); + static int NumberOfStackSlots(Object* data); // Return the number of context slots for code. - static int NumberOfContextSlots(Code* code); + static int NumberOfContextSlots(Object* data); // Return if this has context slots besides MIN_CONTEXT_SLOTS; - static bool HasHeapAllocatedLocals(Code* code); + static bool HasHeapAllocatedLocals(Object* data); - // Lookup support for scope info embedded in Code objects. Returns + // Lookup support for serialized scope info. Returns the // the stack slot index for a given slot name if the slot is // present; otherwise returns a value < 0. The name must be a symbol // (canonicalized). - static int StackSlotIndex(Code* code, String* name); + static int StackSlotIndex(Object* data, String* name); - // Lookup support for scope info embedded in Code objects. Returns the + // Lookup support for serialized scope info. Returns the // context slot index for a given slot name if the slot is present; otherwise // returns a value < 0. The name must be a symbol (canonicalized). // If the slot is present and mode != NULL, sets *mode to the corresponding // mode for that variable. - static int ContextSlotIndex(Code* code, String* name, Variable::Mode* mode); + static int ContextSlotIndex(Object* data, String* name, Variable::Mode* mode); - // Lookup support for scope info embedded in Code objects. Returns the + // Lookup support for serialized scope info. Returns the // parameter index for a given parameter name if the parameter is present; // otherwise returns a value < 0. The name must be a symbol (canonicalized). - static int ParameterIndex(Code* code, String* name); + static int ParameterIndex(Object* data, String* name); - // Lookup support for scope info embedded in Code objects. Returns the + // Lookup support for serialized scope info. Returns the // function context slot index if the function name is present (named // function expressions, only), otherwise returns a value < 0. The name // must be a symbol (canonicalized). - static int FunctionContextSlotIndex(Code* code, String* name); + static int FunctionContextSlotIndex(Object* data, String* name); // -------------------------------------------------------------------------- // Debugging support @@ -155,32 +148,21 @@ class ScopeInfo BASE_EMBEDDED { List<Variable::Mode, Allocator > context_modes_; }; -class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> { - public: - // Create a ZoneScopeInfo instance from a scope. - explicit ZoneScopeInfo(Scope* scope) - : ScopeInfo<ZoneListAllocationPolicy>(scope) {} - - // Create a ZoneScopeInfo instance from a Code object. - explicit ZoneScopeInfo(Code* code) - : ScopeInfo<ZoneListAllocationPolicy>(code) {} -}; - -// Cache for mapping (code, property name) into context slot index. +// Cache for mapping (data, property name) into context slot index. // The cache contains both positive and negative results. // Slot index equals -1 means the property is absent. // Cleared at startup and prior to mark sweep collection. class ContextSlotCache { public: - // Lookup context slot index for (code, name). + // Lookup context slot index for (data, name). // If absent, kNotFound is returned. - static int Lookup(Code* code, + static int Lookup(Object* data, String* name, Variable::Mode* mode); // Update an element in the cache. - static void Update(Code* code, + static void Update(Object* data, String* name, Variable::Mode mode, int slot_index); @@ -190,10 +172,10 @@ class ContextSlotCache { static const int kNotFound = -2; private: - inline static int Hash(Code* code, String* name); + inline static int Hash(Object* data, String* name); #ifdef DEBUG - static void ValidateEntry(Code* code, + static void ValidateEntry(Object* data, String* name, Variable::Mode mode, int slot_index); @@ -201,7 +183,7 @@ class ContextSlotCache { static const int kLength = 256; struct Key { - Code* code; + Object* data; String* name; }; diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index a6a516a76d..e8aed5496f 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -673,6 +673,14 @@ void Deserializer::ReadObject(int space_number, LOG(SnapshotPositionEvent(address, source_->position())); } ReadChunk(current, limit, space_number, address); + + if (space == Heap::map_space()) { + ASSERT(size == Map::kSize); + HeapObject* obj = HeapObject::FromAddress(address); + Map* map = reinterpret_cast<Map*>(obj); + map->set_scavenger(Heap::GetScavenger(map->instance_type(), + map->instance_size())); + } } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index a654a08624..bc29d06a1c 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -1186,7 +1186,7 @@ Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) { // Create code object in the heap. CodeDesc desc; masm_.GetCode(&desc); - Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject()); + Object* result = Heap::CreateCode(desc, flags, masm_.CodeObject()); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_code_stubs && !result->IsFailure()) { Code::cast(result)->Disassemble(name); diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 856904a4a3..8c00ee8308 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -429,23 +429,23 @@ class StubCompiler BASE_EMBEDDED { Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, - Label* miss, - Register extra = no_reg) { - return CheckPrototypes(object, object_reg, holder, holder_reg, scratch, - name, kInvalidProtoDepth, miss, extra); + Label* miss) { + return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1, + scratch2, name, kInvalidProtoDepth, miss); } Register CheckPrototypes(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, int save_at_depth, - Label* miss, - Register extra = no_reg); + Label* miss); protected: Object* GetCodeWithFlags(Code::Flags flags, const char* name); @@ -459,6 +459,7 @@ class StubCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, int index, String* name, Label* miss); @@ -469,6 +470,7 @@ class StubCompiler BASE_EMBEDDED { Register name_reg, Register scratch1, Register scratch2, + Register scratch3, AccessorInfo* callback, String* name, Label* miss, @@ -479,6 +481,7 @@ class StubCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, Object* value, String* name, Label* miss); @@ -490,6 +493,7 @@ class StubCompiler BASE_EMBEDDED { Register name_reg, Register scratch1, Register scratch2, + Register scratch3, String* name, Label* miss); diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc index 516ec67496..2887b7664f 100644 --- a/deps/v8/src/top.cc +++ b/deps/v8/src/top.cc @@ -44,6 +44,11 @@ Mutex* Top::break_access_ = OS::CreateMutex(); NoAllocationStringAllocator* preallocated_message_space = NULL; +bool capture_stack_trace_for_uncaught_exceptions = false; +int stack_trace_for_uncaught_exceptions_frame_limit = 0; +StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options = + StackTrace::kOverview; + Address top_addresses[] = { #define C(name) reinterpret_cast<Address>(Top::name()), TOP_ADDRESS_LIST(C) @@ -365,9 +370,8 @@ Handle<String> Top::StackTraceString() { } -Local<StackTrace> Top::CaptureCurrentStackTrace( +Handle<JSArray> Top::CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options) { - v8::HandleScope scope; // Ensure no negative values. int limit = Max(frame_limit, 0); Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit); @@ -443,7 +447,7 @@ Local<StackTrace> Top::CaptureCurrentStackTrace( } stack_trace->set_length(Smi::FromInt(frames_seen)); - return scope.Close(Utils::StackTraceToLocal(stack_trace)); + return stack_trace; } @@ -681,10 +685,7 @@ Failure* Top::StackOverflow() { // TODO(1240995): To avoid having to call JavaScript code to compute // the message for stack overflow exceptions which is very likely to // double fault with another stack overflow exception, we use a - // precomputed message. This is somewhat problematic in that it - // doesn't use ReportUncaughtException to determine the location - // from where the exception occurred. It should probably be - // reworked. + // precomputed message. DoThrow(*exception, NULL, kStackOverflowMessage); return Failure::Exception(); } @@ -778,25 +779,6 @@ void Top::ComputeLocation(MessageLocation* target) { } -void Top::ReportUncaughtException(Handle<Object> exception, - MessageLocation* location, - Handle<String> stack_trace) { - Handle<Object> message; - if (!Bootstrapper::IsActive()) { - // It's not safe to try to make message objects while the bootstrapper - // is active since the infrastructure may not have been properly - // initialized. - message = - MessageHandler::MakeMessageObject("uncaught_exception", - location, - HandleVector<Object>(&exception, 1), - stack_trace); - } - // Report the uncaught exception. - MessageHandler::ReportMessage(location, message); -} - - bool Top::ShouldReturnException(bool* is_caught_externally, bool catchable_by_javascript) { // Find the top-most try-catch handler. @@ -869,8 +851,15 @@ void Top::DoThrow(Object* exception, // may not have been properly initialized. Handle<String> stack_trace; if (FLAG_trace_exception) stack_trace = StackTraceString(); + Handle<JSArray> stack_trace_object; + if (report_exception && capture_stack_trace_for_uncaught_exceptions) { + stack_trace_object = Top::CaptureCurrentStackTrace( + stack_trace_for_uncaught_exceptions_frame_limit, + stack_trace_for_uncaught_exceptions_options); + } message_obj = MessageHandler::MakeMessageObject("uncaught_exception", - location, HandleVector<Object>(&exception_handle, 1), stack_trace); + location, HandleVector<Object>(&exception_handle, 1), stack_trace, + stack_trace_object); } } @@ -997,6 +986,16 @@ bool Top::OptionalRescheduleException(bool is_bottom_call) { } +void Top::SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit, + StackTrace::StackTraceOptions options) { + capture_stack_trace_for_uncaught_exceptions = capture; + stack_trace_for_uncaught_exceptions_frame_limit = frame_limit; + stack_trace_for_uncaught_exceptions_options = options; +} + + bool Top::is_out_of_memory() { if (has_pending_exception()) { Object* e = pending_exception(); diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h index 4a76a7f87d..87333931eb 100644 --- a/deps/v8/src/top.h +++ b/deps/v8/src/top.h @@ -227,6 +227,11 @@ class Top { (try_catch_handler() == thread_local_.catcher_); } + static void SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit, + StackTrace::StackTraceOptions options); + // Tells whether the current context has experienced an out of memory // exception. static bool is_out_of_memory(); @@ -266,7 +271,7 @@ class Top { static void PrintStack(StringStream* accumulator); static void PrintStack(); static Handle<String> StackTraceString(); - static Local<StackTrace> CaptureCurrentStackTrace( + static Handle<JSArray> CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options); @@ -302,9 +307,6 @@ class Top { const char* message); static bool ShouldReturnException(bool* is_caught_externally, bool catchable_by_javascript); - static void ReportUncaughtException(Handle<Object> exception, - MessageLocation* location, - Handle<String> stack_trace); // Attempts to compute the current source location, storing the // result in the target out parameter. diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 487faabcd3..ffd881fcf6 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -723,7 +723,7 @@ function ObjectDefineProperty(obj, p, attributes) { // ES5 section 15.2.3.7. function ObjectDefineProperties(obj, properties) { - if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && + if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]); var props = ToObject(properties); @@ -745,6 +745,24 @@ function ObjectDefineProperties(obj, properties) { } +// ES5 section 15.2.3.9. +function ObjectFreeze(obj) { + if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && + !IS_UNDETECTABLE(obj)) { + throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]); + } + var names = ObjectGetOwnPropertyNames(obj); + for (var key in names) { + var name = names[key]; + var desc = GetOwnProperty(obj, name); + if (IsDataDescriptor(desc)) desc.setWritable(false); + if (desc.isConfigurable()) desc.setConfigurable(false); + DefineOwnProperty(obj, name, desc, true); + } + ObjectPreventExtension(obj); +} + + // ES5 section 15.2.3.10 function ObjectPreventExtension(obj) { if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && @@ -756,6 +774,26 @@ function ObjectPreventExtension(obj) { } +// ES5 section 15.2.3.12 +function ObjectIsFrozen(obj) { + if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && + !IS_UNDETECTABLE(obj)) { + throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]); + } + var names = ObjectGetOwnPropertyNames(obj); + for (var key in names) { + var name = names[key]; + var desc = GetOwnProperty(obj, name); + if (IsDataDescriptor(desc) && desc.writable) return false; + if (desc.configurable) return false; + } + if (!ObjectIsExtensible(obj)) { + return true; + } + return false; +} + + // ES5 section 15.2.3.13 function ObjectIsExtensible(obj) { if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) && @@ -799,10 +837,12 @@ function SetupObject() { "create", ObjectCreate, "defineProperty", ObjectDefineProperty, "defineProperties", ObjectDefineProperties, + "freeze", ObjectFreeze, "getPrototypeOf", ObjectGetPrototypeOf, "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor, "getOwnPropertyNames", ObjectGetOwnPropertyNames, "isExtensible", ObjectIsExtensible, + "isFrozen", ObjectIsFrozen, "preventExtensions", ObjectPreventExtension )); } diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index d930c8dec2..0a24a6d7c1 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 2 #define MINOR_VERSION 2 -#define BUILD_NUMBER 23 +#define BUILD_NUMBER 24 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION false diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h index 4df2cfda73..aa4cedbb13 100644 --- a/deps/v8/src/vm-state-inl.h +++ b/deps/v8/src/vm-state-inl.h @@ -74,8 +74,10 @@ VMState::VMState(StateTag state) if (state == EXTERNAL) state = OTHER; #endif state_ = state; - previous_ = current_state_; // Save the previous state. - current_state_ = this; // Install the new state. + // Save the previous state. + previous_ = reinterpret_cast<VMState*>(current_state_); + // Install the new state. + OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(this)); #ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_state_changes) { @@ -103,7 +105,8 @@ VMState::VMState(StateTag state) VMState::~VMState() { if (disabled_) return; - current_state_ = previous_; // Return to the previous state. + // Return to the previous state. + OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(previous_)); #ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_state_changes) { diff --git a/deps/v8/src/vm-state.cc b/deps/v8/src/vm-state.cc index 3859efb824..6bd737dfd6 100644 --- a/deps/v8/src/vm-state.cc +++ b/deps/v8/src/vm-state.cc @@ -33,7 +33,7 @@ namespace v8 { namespace internal { #ifdef ENABLE_VMSTATE_TRACKING -VMState* VMState::current_state_ = NULL; +AtomicWord VMState::current_state_ = 0; #endif } } // namespace v8::internal diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h index 241df4c9d4..080eb8ded6 100644 --- a/deps/v8/src/vm-state.h +++ b/deps/v8/src/vm-state.h @@ -44,15 +44,17 @@ class VMState BASE_EMBEDDED { // Used for debug asserts. static bool is_outermost_external() { - return current_state_ == NULL; + return current_state_ == 0; } static StateTag current_state() { - return current_state_ ? current_state_->state() : EXTERNAL; + VMState* state = reinterpret_cast<VMState*>(current_state_); + return state ? state->state() : EXTERNAL; } static Address external_callback() { - return current_state_ ? current_state_->external_callback_ : NULL; + VMState* state = reinterpret_cast<VMState*>(current_state_); + return state ? state->external_callback_ : NULL; } private: @@ -62,7 +64,7 @@ class VMState BASE_EMBEDDED { Address external_callback_; // A stack of VM states. - static VMState* current_state_; + static AtomicWord current_state_; #else public: explicit VMState(StateTag state) {} diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index c19e2ba130..c66666a73a 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -119,7 +119,6 @@ void CpuFeatures::Probe() { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>()); if (!code->IsCode()) return; diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 716f08bb54..9917481696 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -139,149 +139,6 @@ CodeGenState::~CodeGenState() { } -// ------------------------------------------------------------------------- -// Deferred code objects -// -// These subclasses of DeferredCode add pieces of code to the end of generated -// code. They are branched to from the generated code, and -// keep some slower code out of the main body of the generated code. -// Many of them call a code stub or a runtime function. - -class DeferredInlineSmiAdd: public DeferredCode { - public: - DeferredInlineSmiAdd(Register dst, - Smi* value, - OverwriteMode overwrite_mode) - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { - set_comment("[ DeferredInlineSmiAdd"); - } - - virtual void Generate(); - - private: - Register dst_; - Smi* value_; - OverwriteMode overwrite_mode_; -}; - - -// The result of value + src is in dst. It either overflowed or was not -// smi tagged. Undo the speculative addition and call the appropriate -// specialized stub for add. The result is left in dst. -class DeferredInlineSmiAddReversed: public DeferredCode { - public: - DeferredInlineSmiAddReversed(Register dst, - Smi* value, - OverwriteMode overwrite_mode) - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { - set_comment("[ DeferredInlineSmiAddReversed"); - } - - virtual void Generate(); - - private: - Register dst_; - Smi* value_; - OverwriteMode overwrite_mode_; -}; - - -class DeferredInlineSmiSub: public DeferredCode { - public: - DeferredInlineSmiSub(Register dst, - Smi* value, - OverwriteMode overwrite_mode) - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { - set_comment("[ DeferredInlineSmiSub"); - } - - virtual void Generate(); - - private: - Register dst_; - Smi* value_; - OverwriteMode overwrite_mode_; -}; - - -// Call the appropriate binary operation stub to compute src op value -// and leave the result in dst. -class DeferredInlineSmiOperation: public DeferredCode { - public: - DeferredInlineSmiOperation(Token::Value op, - Register dst, - Register src, - Smi* value, - OverwriteMode overwrite_mode) - : op_(op), - dst_(dst), - src_(src), - value_(value), - overwrite_mode_(overwrite_mode) { - set_comment("[ DeferredInlineSmiOperation"); - } - - virtual void Generate(); - - private: - Token::Value op_; - Register dst_; - Register src_; - Smi* value_; - OverwriteMode overwrite_mode_; -}; - - -// Call the appropriate binary operation stub to compute value op src -// and leave the result in dst. -class DeferredInlineSmiOperationReversed: public DeferredCode { - public: - DeferredInlineSmiOperationReversed(Token::Value op, - Register dst, - Smi* value, - Register src, - OverwriteMode overwrite_mode) - : op_(op), - dst_(dst), - value_(value), - src_(src), - overwrite_mode_(overwrite_mode) { - set_comment("[ DeferredInlineSmiOperationReversed"); - } - - virtual void Generate(); - - private: - Token::Value op_; - Register dst_; - Smi* value_; - Register src_; - OverwriteMode overwrite_mode_; -}; - - -class FloatingPointHelper : public AllStatic { - public: - // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. - // If the operands are not both numbers, jump to not_numbers. - // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. - // NumberOperands assumes both are smis or heap numbers. - static void LoadSSE2SmiOperands(MacroAssembler* masm); - static void LoadSSE2NumberOperands(MacroAssembler* masm); - static void LoadSSE2UnknownOperands(MacroAssembler* masm, - Label* not_numbers); - - // Takes the operands in rdx and rax and loads them as integers in rax - // and rcx. - static void LoadAsIntegers(MacroAssembler* masm, - Label* operand_conversion_failure, - Register heap_number_map); - // As above, but we know the operands to be numbers. In that case, - // conversion can't fail. - static void LoadNumbersAsIntegers(MacroAssembler* masm); -}; - - // ----------------------------------------------------------------------------- // CodeGenerator implementation. @@ -298,21 +155,6 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm) } -void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { - // Call the runtime to declare the globals. The inevitable call - // will sync frame elements to memory anyway, so we do it eagerly to - // allow us to push the arguments directly into place. - frame_->SyncRange(0, frame_->element_count() - 1); - - __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT); - frame_->EmitPush(rsi); // The context is the first argument. - frame_->EmitPush(kScratchRegister); - frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); - Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); - // Return value is ignored. -} - - void CodeGenerator::Generate(CompilationInfo* info) { // Record the position for debugging purposes. CodeForFunctionPosition(info->function()); @@ -543,209 +385,2077 @@ void CodeGenerator::Generate(CompilationInfo* info) { allocator_ = NULL; } -void CodeGenerator::GenerateReturnSequence(Result* return_value) { - // The return value is a live (but not currently reference counted) - // reference to rax. This is safe because the current frame does not - // contain a reference to rax (it is prepared for the return by spilling - // all registers). - if (FLAG_trace) { - frame_->Push(return_value); - *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); + +Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { + // Currently, this assertion will fail if we try to assign to + // a constant variable that is constant because it is read-only + // (such as the variable referring to a named function expression). + // We need to implement assignments to read-only variables. + // Ideally, we should do this during AST generation (by converting + // such assignments into expression statements); however, in general + // we may not be able to make the decision until past AST generation, + // that is when the entire program is known. + ASSERT(slot != NULL); + int index = slot->index(); + switch (slot->type()) { + case Slot::PARAMETER: + return frame_->ParameterAt(index); + + case Slot::LOCAL: + return frame_->LocalAt(index); + + case Slot::CONTEXT: { + // Follow the context chain if necessary. + ASSERT(!tmp.is(rsi)); // do not overwrite context register + Register context = rsi; + int chain_length = scope()->ContextChainLength(slot->var()->scope()); + for (int i = 0; i < chain_length; i++) { + // Load the closure. + // (All contexts, even 'with' contexts, have a closure, + // and it is the same for all contexts inside a function. + // There is no need to go to the function context first.) + __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); + context = tmp; + } + // We may have a 'with' context now. Get the function context. + // (In fact this mov may never be the needed, since the scope analysis + // may not permit a direct context access in this case and thus we are + // always at a function context. However it is safe to dereference be- + // cause the function context of a function context is itself. Before + // deleting this mov we should try to create a counter-example first, + // though...) + __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp, index); + } + + default: + UNREACHABLE(); + return Operand(rsp, 0); } - return_value->ToRegister(rax); +} - // Add a label for checking the size of the code used for returning. + +Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, + Result tmp, + JumpTarget* slow) { + ASSERT(slot->type() == Slot::CONTEXT); + ASSERT(tmp.is_register()); + Register context = rsi; + + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + slow->Branch(not_equal, not_taken); + } + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + context = tmp.reg(); + } + } + // Check that last extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); + slow->Branch(not_equal, not_taken); + __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp.reg(), slot->index()); +} + + +// Emit code to load the value of an expression to the top of the +// frame. If the expression is boolean-valued it may be compiled (or +// partially compiled) into control flow to the control destination. +// If force_control is true, control flow is forced. +void CodeGenerator::LoadCondition(Expression* x, + ControlDestination* dest, + bool force_control) { + ASSERT(!in_spilled_code()); + int original_height = frame_->height(); + + { CodeGenState new_state(this, dest); + Visit(x); + + // If we hit a stack overflow, we may not have actually visited + // the expression. In that case, we ensure that we have a + // valid-looking frame state because we will continue to generate + // code as we unwind the C++ stack. + // + // It's possible to have both a stack overflow and a valid frame + // state (eg, a subexpression overflowed, visiting it returned + // with a dummied frame state, and visiting this expression + // returned with a normal-looking state). + if (HasStackOverflow() && + !dest->is_used() && + frame_->height() == original_height) { + dest->Goto(true); + } + } + + if (force_control && !dest->is_used()) { + // Convert the TOS value into flow to the control destination. + // TODO(X64): Make control flow to control destinations work. + ToBoolean(dest); + } + + ASSERT(!(force_control && !dest->is_used())); + ASSERT(dest->is_used() || frame_->height() == original_height + 1); +} + + +void CodeGenerator::LoadAndSpill(Expression* expression) { + // TODO(x64): No architecture specific code. Move to shared location. + ASSERT(in_spilled_code()); + set_in_spilled_code(false); + Load(expression); + frame_->SpillAll(); + set_in_spilled_code(true); +} + + +void CodeGenerator::Load(Expression* expr) { #ifdef DEBUG - Label check_exit_codesize; - masm_->bind(&check_exit_codesize); + int original_height = frame_->height(); #endif + ASSERT(!in_spilled_code()); + JumpTarget true_target; + JumpTarget false_target; + ControlDestination dest(&true_target, &false_target, true); + LoadCondition(expr, &dest, false); - // Leave the frame and return popping the arguments and the - // receiver. - frame_->Exit(); - masm_->ret((scope()->num_parameters() + 1) * kPointerSize); -#ifdef ENABLE_DEBUGGER_SUPPORT - // Add padding that will be overwritten by a debugger breakpoint. - // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k" - // with length 7 (3 + 1 + 3). - const int kPadding = Assembler::kJSReturnSequenceLength - 7; - for (int i = 0; i < kPadding; ++i) { - masm_->int3(); + if (dest.false_was_fall_through()) { + // The false target was just bound. + JumpTarget loaded; + frame_->Push(Factory::false_value()); + // There may be dangling jumps to the true target. + if (true_target.is_linked()) { + loaded.Jump(); + true_target.Bind(); + frame_->Push(Factory::true_value()); + loaded.Bind(); + } + + } else if (dest.is_used()) { + // There is true, and possibly false, control flow (with true as + // the fall through). + JumpTarget loaded; + frame_->Push(Factory::true_value()); + if (false_target.is_linked()) { + loaded.Jump(); + false_target.Bind(); + frame_->Push(Factory::false_value()); + loaded.Bind(); + } + + } else { + // We have a valid value on top of the frame, but we still may + // have dangling jumps to the true and false targets from nested + // subexpressions (eg, the left subexpressions of the + // short-circuited boolean operators). + ASSERT(has_valid_frame()); + if (true_target.is_linked() || false_target.is_linked()) { + JumpTarget loaded; + loaded.Jump(); // Don't lose the current TOS. + if (true_target.is_linked()) { + true_target.Bind(); + frame_->Push(Factory::true_value()); + if (false_target.is_linked()) { + loaded.Jump(); + } + } + if (false_target.is_linked()) { + false_target.Bind(); + frame_->Push(Factory::false_value()); + } + loaded.Bind(); + } } - // Check that the size of the code used for returning matches what is - // expected by the debugger. - ASSERT_EQ(Assembler::kJSReturnSequenceLength, - masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); -#endif - DeleteFrame(); + + ASSERT(has_valid_frame()); + ASSERT(frame_->height() == original_height + 1); } -#ifdef DEBUG -bool CodeGenerator::HasValidEntryRegisters() { - return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) - && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) - && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) - && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) - && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) - && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) - && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) - && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) - && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) - && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); +void CodeGenerator::LoadGlobal() { + if (in_spilled_code()) { + frame_->EmitPush(GlobalObject()); + } else { + Result temp = allocator_->Allocate(); + __ movq(temp.reg(), GlobalObject()); + frame_->Push(&temp); + } } -#endif -class DeferredReferenceGetKeyedValue: public DeferredCode { +void CodeGenerator::LoadGlobalReceiver() { + Result temp = allocator_->Allocate(); + Register reg = temp.reg(); + __ movq(reg, GlobalObject()); + __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); + frame_->Push(&temp); +} + + +void CodeGenerator::LoadTypeofExpression(Expression* expr) { + // Special handling of identifiers as subexpressions of typeof. + Variable* variable = expr->AsVariableProxy()->AsVariable(); + if (variable != NULL && !variable->is_this() && variable->is_global()) { + // For a global variable we build the property reference + // <global>.<variable> and perform a (regular non-contextual) property + // load to make sure we do not get reference errors. + Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); + Literal key(variable->name()); + Property property(&global, &key, RelocInfo::kNoPosition); + Reference ref(this, &property); + ref.GetValue(); + } else if (variable != NULL && variable->slot() != NULL) { + // For a variable that rewrites to a slot, we signal it is the immediate + // subexpression of a typeof. + LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); + } else { + // Anything else can be handled normally. + Load(expr); + } +} + + +ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { + if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; + ASSERT(scope()->arguments_shadow() != NULL); + // We don't want to do lazy arguments allocation for functions that + // have heap-allocated contexts, because it interfers with the + // uninitialized const tracking in the context objects. + return (scope()->num_heap_slots() > 0) + ? EAGER_ARGUMENTS_ALLOCATION + : LAZY_ARGUMENTS_ALLOCATION; +} + + +Result CodeGenerator::StoreArgumentsObject(bool initial) { + ArgumentsAllocationMode mode = ArgumentsMode(); + ASSERT(mode != NO_ARGUMENTS_ALLOCATION); + + Comment cmnt(masm_, "[ store arguments object"); + if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { + // When using lazy arguments allocation, we store the hole value + // as a sentinel indicating that the arguments object hasn't been + // allocated yet. + frame_->Push(Factory::the_hole_value()); + } else { + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + frame_->PushFunction(); + frame_->PushReceiverSlotAddress(); + frame_->Push(Smi::FromInt(scope()->num_parameters())); + Result result = frame_->CallStub(&stub, 3); + frame_->Push(&result); + } + + + Variable* arguments = scope()->arguments()->var(); + Variable* shadow = scope()->arguments_shadow()->var(); + ASSERT(arguments != NULL && arguments->slot() != NULL); + ASSERT(shadow != NULL && shadow->slot() != NULL); + JumpTarget done; + bool skip_arguments = false; + if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { + // We have to skip storing into the arguments slot if it has + // already been written to. This can happen if the a function + // has a local variable named 'arguments'. + LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); + Result probe = frame_->Pop(); + if (probe.is_constant()) { + // We have to skip updating the arguments object if it has been + // assigned a proper value. + skip_arguments = !probe.handle()->IsTheHole(); + } else { + __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex); + probe.Unuse(); + done.Branch(not_equal); + } + } + if (!skip_arguments) { + StoreToSlot(arguments->slot(), NOT_CONST_INIT); + if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); + } + StoreToSlot(shadow->slot(), NOT_CONST_INIT); + return frame_->Pop(); +} + +//------------------------------------------------------------------------------ +// CodeGenerator implementation of variables, lookups, and stores. + +//------------------------------------------------------------------------------ +// CodeGenerator implementation of variables, lookups, and stores. + +Reference::Reference(CodeGenerator* cgen, + Expression* expression, + bool persist_after_get) + : cgen_(cgen), + expression_(expression), + type_(ILLEGAL), + persist_after_get_(persist_after_get) { + cgen->LoadReference(this); +} + + +Reference::~Reference() { + ASSERT(is_unloaded() || is_illegal()); +} + + +void CodeGenerator::LoadReference(Reference* ref) { + // References are loaded from both spilled and unspilled code. Set the + // state to unspilled to allow that (and explicitly spill after + // construction at the construction sites). + bool was_in_spilled_code = in_spilled_code_; + in_spilled_code_ = false; + + Comment cmnt(masm_, "[ LoadReference"); + Expression* e = ref->expression(); + Property* property = e->AsProperty(); + Variable* var = e->AsVariableProxy()->AsVariable(); + + if (property != NULL) { + // The expression is either a property or a variable proxy that rewrites + // to a property. + Load(property->obj()); + if (property->key()->IsPropertyName()) { + ref->set_type(Reference::NAMED); + } else { + Load(property->key()); + ref->set_type(Reference::KEYED); + } + } else if (var != NULL) { + // The expression is a variable proxy that does not rewrite to a + // property. Global variables are treated as named property references. + if (var->is_global()) { + // If rax is free, the register allocator prefers it. Thus the code + // generator will load the global object into rax, which is where + // LoadIC wants it. Most uses of Reference call LoadIC directly + // after the reference is created. + frame_->Spill(rax); + LoadGlobal(); + ref->set_type(Reference::NAMED); + } else { + ASSERT(var->slot() != NULL); + ref->set_type(Reference::SLOT); + } + } else { + // Anything else is a runtime error. + Load(e); + frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + } + + in_spilled_code_ = was_in_spilled_code; +} + + +void CodeGenerator::UnloadReference(Reference* ref) { + // Pop a reference from the stack while preserving TOS. + Comment cmnt(masm_, "[ UnloadReference"); + frame_->Nip(ref->size()); + ref->set_unloaded(); +} + + +// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and +// convert it to a boolean in the condition code register or jump to +// 'false_target'/'true_target' as appropriate. +void CodeGenerator::ToBoolean(ControlDestination* dest) { + Comment cmnt(masm_, "[ ToBoolean"); + + // The value to convert should be popped from the frame. + Result value = frame_->Pop(); + value.ToRegister(); + + if (value.is_number()) { + // Fast case if TypeInfo indicates only numbers. + if (FLAG_debug_code) { + __ AbortIfNotNumber(value.reg()); + } + // Smi => false iff zero. + __ SmiCompare(value.reg(), Smi::FromInt(0)); + if (value.is_smi()) { + value.Unuse(); + dest->Split(not_zero); + } else { + dest->false_target()->Branch(equal); + Condition is_smi = masm_->CheckSmi(value.reg()); + dest->true_target()->Branch(is_smi); + __ xorpd(xmm0, xmm0); + __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset)); + value.Unuse(); + dest->Split(not_zero); + } + } else { + // Fast case checks. + // 'false' => false. + __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); + dest->false_target()->Branch(equal); + + // 'true' => true. + __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); + dest->true_target()->Branch(equal); + + // 'undefined' => false. + __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); + dest->false_target()->Branch(equal); + + // Smi => false iff zero. + __ SmiCompare(value.reg(), Smi::FromInt(0)); + dest->false_target()->Branch(equal); + Condition is_smi = masm_->CheckSmi(value.reg()); + dest->true_target()->Branch(is_smi); + + // Call the stub for all other cases. + frame_->Push(&value); // Undo the Pop() from above. + ToBooleanStub stub; + Result temp = frame_->CallStub(&stub, 1); + // Convert the result to a condition code. + __ testq(temp.reg(), temp.reg()); + temp.Unuse(); + dest->Split(not_equal); + } +} + + +class FloatingPointHelper : public AllStatic { public: - explicit DeferredReferenceGetKeyedValue(Register dst, - Register receiver, - Register key) - : dst_(dst), receiver_(receiver), key_(key) { - set_comment("[ DeferredReferenceGetKeyedValue"); + // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. + // If the operands are not both numbers, jump to not_numbers. + // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. + // NumberOperands assumes both are smis or heap numbers. + static void LoadSSE2SmiOperands(MacroAssembler* masm); + static void LoadSSE2NumberOperands(MacroAssembler* masm); + static void LoadSSE2UnknownOperands(MacroAssembler* masm, + Label* not_numbers); + + // Takes the operands in rdx and rax and loads them as integers in rax + // and rcx. + static void LoadAsIntegers(MacroAssembler* masm, + Label* operand_conversion_failure, + Register heap_number_map); + // As above, but we know the operands to be numbers. In that case, + // conversion can't fail. + static void LoadNumbersAsIntegers(MacroAssembler* masm); +}; + + +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int len = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(len); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; } - virtual void Generate(); + OS::SNPrintF(Vector<char>(name_, len), + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", + op_name, + overwrite_name, + (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", + args_in_registers_ ? "RegArgs" : "StackArgs", + args_reversed_ ? "_R" : "", + static_operands_type_.ToString(), + BinaryOpIC::GetName(runtime_operands_type_)); + return name_; +} - Label* patch_site() { return &patch_site_; } + +// Call the specialized stub for a binary operation. +class DeferredInlineBinaryOperation: public DeferredCode { + public: + DeferredInlineBinaryOperation(Token::Value op, + Register dst, + Register left, + Register right, + OverwriteMode mode) + : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { + set_comment("[ DeferredInlineBinaryOperation"); + } + + virtual void Generate(); private: - Label patch_site_; + Token::Value op_; Register dst_; - Register receiver_; - Register key_; + Register left_; + Register right_; + OverwriteMode mode_; }; -void DeferredReferenceGetKeyedValue::Generate() { - if (receiver_.is(rdx)) { - if (!key_.is(rax)) { - __ movq(rax, key_); - } // else do nothing. - } else if (receiver_.is(rax)) { - if (key_.is(rdx)) { - __ xchg(rax, rdx); - } else if (key_.is(rax)) { - __ movq(rdx, receiver_); - } else { - __ movq(rdx, receiver_); - __ movq(rax, key_); +void DeferredInlineBinaryOperation::Generate() { + Label done; + if ((op_ == Token::ADD) + || (op_ == Token::SUB) + || (op_ == Token::MUL) + || (op_ == Token::DIV)) { + Label call_runtime; + Label left_smi, right_smi, load_right, do_op; + __ JumpIfSmi(left_, &left_smi); + __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_runtime); + __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); + if (mode_ == OVERWRITE_LEFT) { + __ movq(dst_, left_); } - } else if (key_.is(rax)) { - __ movq(rdx, receiver_); + __ jmp(&load_right); + + __ bind(&left_smi); + __ SmiToInteger32(left_, left_); + __ cvtlsi2sd(xmm0, left_); + __ Integer32ToSmi(left_, left_); + if (mode_ == OVERWRITE_LEFT) { + Label alloc_failure; + __ AllocateHeapNumber(dst_, no_reg, &call_runtime); + } + + __ bind(&load_right); + __ JumpIfSmi(right_, &right_smi); + __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &call_runtime); + __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); + if (mode_ == OVERWRITE_RIGHT) { + __ movq(dst_, right_); + } else if (mode_ == NO_OVERWRITE) { + Label alloc_failure; + __ AllocateHeapNumber(dst_, no_reg, &call_runtime); + } + __ jmp(&do_op); + + __ bind(&right_smi); + __ SmiToInteger32(right_, right_); + __ cvtlsi2sd(xmm1, right_); + __ Integer32ToSmi(right_, right_); + if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { + Label alloc_failure; + __ AllocateHeapNumber(dst_, no_reg, &call_runtime); + } + + __ bind(&do_op); + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); + __ jmp(&done); + + __ bind(&call_runtime); + } + GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); + stub.GenerateCall(masm_, left_, right_); + if (!dst_.is(rax)) __ movq(dst_, rax); + __ bind(&done); +} + + +static TypeInfo CalculateTypeInfo(TypeInfo operands_type, + Token::Value op, + const Result& right, + const Result& left) { + // Set TypeInfo of result according to the operation performed. + // We rely on the fact that smis have a 32 bit payload on x64. + STATIC_ASSERT(kSmiValueSize == 32); + switch (op) { + case Token::COMMA: + return right.type_info(); + case Token::OR: + case Token::AND: + // Result type can be either of the two input types. + return operands_type; + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + // Result is always a smi. + return TypeInfo::Smi(); + case Token::SAR: + case Token::SHL: + // Result is always a smi. + return TypeInfo::Smi(); + case Token::SHR: + // Result of x >>> y is always a smi if masked y >= 1, otherwise a number. + return (right.is_constant() && right.handle()->IsSmi() + && (Smi::cast(*right.handle())->value() & 0x1F) >= 1) + ? TypeInfo::Smi() + : TypeInfo::Number(); + case Token::ADD: + if (operands_type.IsNumber()) { + return TypeInfo::Number(); + } else if (left.type_info().IsString() || right.type_info().IsString()) { + return TypeInfo::String(); + } else { + return TypeInfo::Unknown(); + } + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + // Result is always a number. + return TypeInfo::Number(); + default: + UNREACHABLE(); + } + UNREACHABLE(); + return TypeInfo::Unknown(); +} + + +void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, + OverwriteMode overwrite_mode) { + Comment cmnt(masm_, "[ BinaryOperation"); + Token::Value op = expr->op(); + Comment cmnt_token(masm_, Token::String(op)); + + if (op == Token::COMMA) { + // Simply discard left value. + frame_->Nip(1); + return; + } + + Result right = frame_->Pop(); + Result left = frame_->Pop(); + + if (op == Token::ADD) { + const bool left_is_string = left.type_info().IsString(); + const bool right_is_string = right.type_info().IsString(); + // Make sure constant strings have string type info. + ASSERT(!(left.is_constant() && left.handle()->IsString()) || + left_is_string); + ASSERT(!(right.is_constant() && right.handle()->IsString()) || + right_is_string); + if (left_is_string || right_is_string) { + frame_->Push(&left); + frame_->Push(&right); + Result answer; + if (left_is_string) { + if (right_is_string) { + StringAddStub stub(NO_STRING_CHECK_IN_STUB); + answer = frame_->CallStub(&stub, 2); + } else { + answer = + frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); + } + } else if (right_is_string) { + answer = + frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); + } + answer.set_type_info(TypeInfo::String()); + frame_->Push(&answer); + return; + } + // Neither operand is known to be a string. + } + + bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi(); + bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi(); + bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi(); + bool right_is_non_smi_constant = + right.is_constant() && !right.handle()->IsSmi(); + + if (left_is_smi_constant && right_is_smi_constant) { + // Compute the constant result at compile time, and leave it on the frame. + int left_int = Smi::cast(*left.handle())->value(); + int right_int = Smi::cast(*right.handle())->value(); + if (FoldConstantSmis(op, left_int, right_int)) return; + } + + // Get number type of left and right sub-expressions. + TypeInfo operands_type = + TypeInfo::Combine(left.type_info(), right.type_info()); + + TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); + + Result answer; + if (left_is_non_smi_constant || right_is_non_smi_constant) { + // Go straight to the slow case, with no smi code. + GenericBinaryOpStub stub(op, + overwrite_mode, + NO_SMI_CODE_IN_STUB, + operands_type); + answer = stub.GenerateCall(masm_, frame_, &left, &right); + } else if (right_is_smi_constant) { + answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), + false, overwrite_mode); + } else if (left_is_smi_constant) { + answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), + true, overwrite_mode); } else { - __ movq(rax, key_); - __ movq(rdx, receiver_); + // Set the flags based on the operation, type and loop nesting level. + // Bit operations always assume they likely operate on Smis. Still only + // generate the inline Smi check code if this operation is part of a loop. + // For all other operations only inline the Smi check code for likely smis + // if the operation is part of a loop. + if (loop_nesting() > 0 && + (Token::IsBitOp(op) || + operands_type.IsInteger32() || + expr->type()->IsLikelySmi())) { + answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); + } else { + GenericBinaryOpStub stub(op, + overwrite_mode, + NO_GENERIC_BINARY_FLAGS, + operands_type); + answer = stub.GenerateCall(masm_, frame_, &left, &right); + } } - // Calculate the delta from the IC call instruction to the map check - // movq instruction in the inlined version. This delta is stored in - // a test(rax, delta) instruction after the call so that we can find - // it in the IC initialization code and patch the movq instruction. - // This means that we cannot allow test instructions after calls to - // KeyedLoadIC stubs in other places. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); - __ Call(ic, RelocInfo::CODE_TARGET); - // The delta from the start of the map-compare instruction to the - // test instruction. We use masm_-> directly here instead of the __ - // macro because the macro sometimes uses macro expansion to turn - // into something that can't return a value. This is encountered - // when doing generated code coverage tests. - int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); - // Here we use masm_-> instead of the __ macro because this is the - // instruction that gets patched and coverage code gets in the way. - // TODO(X64): Consider whether it's worth switching the test to a - // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't - // be generated normally. - masm_->testl(rax, Immediate(-delta_to_patch_site)); - __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); + answer.set_type_info(result_type); + frame_->Push(&answer); +} + + +bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { + Object* answer_object = Heap::undefined_value(); + switch (op) { + case Token::ADD: + // Use intptr_t to detect overflow of 32-bit int. + if (Smi::IsValid(static_cast<intptr_t>(left) + right)) { + answer_object = Smi::FromInt(left + right); + } + break; + case Token::SUB: + // Use intptr_t to detect overflow of 32-bit int. + if (Smi::IsValid(static_cast<intptr_t>(left) - right)) { + answer_object = Smi::FromInt(left - right); + } + break; + case Token::MUL: { + double answer = static_cast<double>(left) * right; + if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { + // If the product is zero and the non-zero factor is negative, + // the spec requires us to return floating point negative zero. + if (answer != 0 || (left + right) >= 0) { + answer_object = Smi::FromInt(static_cast<int>(answer)); + } + } + } + break; + case Token::DIV: + case Token::MOD: + break; + case Token::BIT_OR: + answer_object = Smi::FromInt(left | right); + break; + case Token::BIT_AND: + answer_object = Smi::FromInt(left & right); + break; + case Token::BIT_XOR: + answer_object = Smi::FromInt(left ^ right); + break; + + case Token::SHL: { + int shift_amount = right & 0x1F; + if (Smi::IsValid(left << shift_amount)) { + answer_object = Smi::FromInt(left << shift_amount); + } + break; + } + case Token::SHR: { + int shift_amount = right & 0x1F; + unsigned int unsigned_left = left; + unsigned_left >>= shift_amount; + if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { + answer_object = Smi::FromInt(unsigned_left); + } + break; + } + case Token::SAR: { + int shift_amount = right & 0x1F; + unsigned int unsigned_left = left; + if (left < 0) { + // Perform arithmetic shift of a negative number by + // complementing number, logical shifting, complementing again. + unsigned_left = ~unsigned_left; + unsigned_left >>= shift_amount; + unsigned_left = ~unsigned_left; + } else { + unsigned_left >>= shift_amount; + } + ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); + answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); + break; + } + default: + UNREACHABLE(); + break; + } + if (answer_object == Heap::undefined_value()) { + return false; + } + frame_->Push(Handle<Object>(answer_object)); + return true; +} + + +void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg, + TypeInfo type, + DeferredCode* deferred) { + if (!type.IsSmi()) { + __ JumpIfNotSmi(reg, deferred->entry_label()); + } + if (FLAG_debug_code) { + __ AbortIfNotSmi(reg); + } +} + + +void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, + Register right, + TypeInfo left_info, + TypeInfo right_info, + DeferredCode* deferred) { + if (!left_info.IsSmi() && !right_info.IsSmi()) { + __ JumpIfNotBothSmi(left, right, deferred->entry_label()); + } else if (!left_info.IsSmi()) { + __ JumpIfNotSmi(left, deferred->entry_label()); + } else if (!right_info.IsSmi()) { + __ JumpIfNotSmi(right, deferred->entry_label()); + } + if (FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } +} + + +// Implements a binary operation using a deferred code object and some +// inline code to operate on smis quickly. +Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, + Result* left, + Result* right, + OverwriteMode overwrite_mode) { + // Copy the type info because left and right may be overwritten. + TypeInfo left_type_info = left->type_info(); + TypeInfo right_type_info = right->type_info(); + Token::Value op = expr->op(); + Result answer; + // Special handling of div and mod because they use fixed registers. + if (op == Token::DIV || op == Token::MOD) { + // We need rax as the quotient register, rdx as the remainder + // register, neither left nor right in rax or rdx, and left copied + // to rax. + Result quotient; + Result remainder; + bool left_is_in_rax = false; + // Step 1: get rax for quotient. + if ((left->is_register() && left->reg().is(rax)) || + (right->is_register() && right->reg().is(rax))) { + // One or both is in rax. Use a fresh non-rdx register for + // them. + Result fresh = allocator_->Allocate(); + ASSERT(fresh.is_valid()); + if (fresh.reg().is(rdx)) { + remainder = fresh; + fresh = allocator_->Allocate(); + ASSERT(fresh.is_valid()); + } + if (left->is_register() && left->reg().is(rax)) { + quotient = *left; + *left = fresh; + left_is_in_rax = true; + } + if (right->is_register() && right->reg().is(rax)) { + quotient = *right; + *right = fresh; + } + __ movq(fresh.reg(), rax); + } else { + // Neither left nor right is in rax. + quotient = allocator_->Allocate(rax); + } + ASSERT(quotient.is_register() && quotient.reg().is(rax)); + ASSERT(!(left->is_register() && left->reg().is(rax))); + ASSERT(!(right->is_register() && right->reg().is(rax))); + + // Step 2: get rdx for remainder if necessary. + if (!remainder.is_valid()) { + if ((left->is_register() && left->reg().is(rdx)) || + (right->is_register() && right->reg().is(rdx))) { + Result fresh = allocator_->Allocate(); + ASSERT(fresh.is_valid()); + if (left->is_register() && left->reg().is(rdx)) { + remainder = *left; + *left = fresh; + } + if (right->is_register() && right->reg().is(rdx)) { + remainder = *right; + *right = fresh; + } + __ movq(fresh.reg(), rdx); + } else { + // Neither left nor right is in rdx. + remainder = allocator_->Allocate(rdx); + } + } + ASSERT(remainder.is_register() && remainder.reg().is(rdx)); + ASSERT(!(left->is_register() && left->reg().is(rdx))); + ASSERT(!(right->is_register() && right->reg().is(rdx))); + + left->ToRegister(); + right->ToRegister(); + frame_->Spill(rax); + frame_->Spill(rdx); + + // Check that left and right are smi tagged. + DeferredInlineBinaryOperation* deferred = + new DeferredInlineBinaryOperation(op, + (op == Token::DIV) ? rax : rdx, + left->reg(), + right->reg(), + overwrite_mode); + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), + left_type_info, right_type_info, deferred); + + if (op == Token::DIV) { + __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label()); + deferred->BindExit(); + left->Unuse(); + right->Unuse(); + answer = quotient; + } else { + ASSERT(op == Token::MOD); + __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label()); + deferred->BindExit(); + left->Unuse(); + right->Unuse(); + answer = remainder; + } + ASSERT(answer.is_valid()); + return answer; + } + + // Special handling of shift operations because they use fixed + // registers. + if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { + // Move left out of rcx if necessary. + if (left->is_register() && left->reg().is(rcx)) { + *left = allocator_->Allocate(); + ASSERT(left->is_valid()); + __ movq(left->reg(), rcx); + } + right->ToRegister(rcx); + left->ToRegister(); + ASSERT(left->is_register() && !left->reg().is(rcx)); + ASSERT(right->is_register() && right->reg().is(rcx)); + + // We will modify right, it must be spilled. + frame_->Spill(rcx); + + // Use a fresh answer register to avoid spilling the left operand. + answer = allocator_->Allocate(); + ASSERT(answer.is_valid()); + // Check that both operands are smis using the answer register as a + // temporary. + DeferredInlineBinaryOperation* deferred = + new DeferredInlineBinaryOperation(op, + answer.reg(), + left->reg(), + rcx, + overwrite_mode); + + Label do_op; + if (right_type_info.IsSmi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(right->reg()); + } + __ movq(answer.reg(), left->reg()); + // If left is not known to be a smi, check if it is. + // If left is not known to be a number, and it isn't a smi, check if + // it is a HeapNumber. + if (!left_type_info.IsSmi()) { + __ JumpIfSmi(answer.reg(), &do_op); + if (!left_type_info.IsNumber()) { + // Branch if not a heapnumber. + __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), + Factory::heap_number_map()); + deferred->Branch(not_equal); + } + // Load integer value into answer register using truncation. + __ cvttsd2si(answer.reg(), + FieldOperand(answer.reg(), HeapNumber::kValueOffset)); + // Branch if we might have overflowed. + // (False negative for Smi::kMinValue) + __ cmpq(answer.reg(), Immediate(0x80000000)); + deferred->Branch(equal); + // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging. + __ Integer32ToSmi(answer.reg(), answer.reg()); + } else { + // Fast case - both are actually smis. + if (FLAG_debug_code) { + __ AbortIfNotSmi(left->reg()); + } + } + } else { + JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx, + left_type_info, right_type_info, deferred); + } + __ bind(&do_op); + + // Perform the operation. + switch (op) { + case Token::SAR: + __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx); + break; + case Token::SHR: { + __ SmiShiftLogicalRight(answer.reg(), + left->reg(), + rcx, + deferred->entry_label()); + break; + } + case Token::SHL: { + __ SmiShiftLeft(answer.reg(), + left->reg(), + rcx); + break; + } + default: + UNREACHABLE(); + } + deferred->BindExit(); + left->Unuse(); + right->Unuse(); + ASSERT(answer.is_valid()); + return answer; + } + + // Handle the other binary operations. + left->ToRegister(); + right->ToRegister(); + // A newly allocated register answer is used to hold the answer. The + // registers containing left and right are not modified so they don't + // need to be spilled in the fast case. + answer = allocator_->Allocate(); + ASSERT(answer.is_valid()); + + // Perform the smi tag check. + DeferredInlineBinaryOperation* deferred = + new DeferredInlineBinaryOperation(op, + answer.reg(), + left->reg(), + right->reg(), + overwrite_mode); + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), + left_type_info, right_type_info, deferred); + + switch (op) { + case Token::ADD: + __ SmiAdd(answer.reg(), + left->reg(), + right->reg(), + deferred->entry_label()); + break; + + case Token::SUB: + __ SmiSub(answer.reg(), + left->reg(), + right->reg(), + deferred->entry_label()); + break; + + case Token::MUL: { + __ SmiMul(answer.reg(), + left->reg(), + right->reg(), + deferred->entry_label()); + break; + } + + case Token::BIT_OR: + __ SmiOr(answer.reg(), left->reg(), right->reg()); + break; + + case Token::BIT_AND: + __ SmiAnd(answer.reg(), left->reg(), right->reg()); + break; + + case Token::BIT_XOR: + __ SmiXor(answer.reg(), left->reg(), right->reg()); + break; + + default: + UNREACHABLE(); + break; + } + deferred->BindExit(); + left->Unuse(); + right->Unuse(); + ASSERT(answer.is_valid()); + return answer; +} + + +// Call the appropriate binary operation stub to compute src op value +// and leave the result in dst. +class DeferredInlineSmiOperation: public DeferredCode { + public: + DeferredInlineSmiOperation(Token::Value op, + Register dst, + Register src, + Smi* value, + OverwriteMode overwrite_mode) + : op_(op), + dst_(dst), + src_(src), + value_(value), + overwrite_mode_(overwrite_mode) { + set_comment("[ DeferredInlineSmiOperation"); + } + + virtual void Generate(); + + private: + Token::Value op_; + Register dst_; + Register src_; + Smi* value_; + OverwriteMode overwrite_mode_; +}; + + +void DeferredInlineSmiOperation::Generate() { + // For mod we don't generate all the Smi code inline. + GenericBinaryOpStub stub( + op_, + overwrite_mode_, + (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); + stub.GenerateCall(masm_, src_, value_); if (!dst_.is(rax)) __ movq(dst_, rax); } -class DeferredReferenceSetKeyedValue: public DeferredCode { +// Call the appropriate binary operation stub to compute value op src +// and leave the result in dst. +class DeferredInlineSmiOperationReversed: public DeferredCode { public: - DeferredReferenceSetKeyedValue(Register value, - Register key, - Register receiver) - : value_(value), key_(key), receiver_(receiver) { - set_comment("[ DeferredReferenceSetKeyedValue"); + DeferredInlineSmiOperationReversed(Token::Value op, + Register dst, + Smi* value, + Register src, + OverwriteMode overwrite_mode) + : op_(op), + dst_(dst), + value_(value), + src_(src), + overwrite_mode_(overwrite_mode) { + set_comment("[ DeferredInlineSmiOperationReversed"); } virtual void Generate(); - Label* patch_site() { return &patch_site_; } + private: + Token::Value op_; + Register dst_; + Smi* value_; + Register src_; + OverwriteMode overwrite_mode_; +}; + + +void DeferredInlineSmiOperationReversed::Generate() { + GenericBinaryOpStub stub( + op_, + overwrite_mode_, + NO_SMI_CODE_IN_STUB); + stub.GenerateCall(masm_, value_, src_); + if (!dst_.is(rax)) __ movq(dst_, rax); +} +class DeferredInlineSmiAdd: public DeferredCode { + public: + DeferredInlineSmiAdd(Register dst, + Smi* value, + OverwriteMode overwrite_mode) + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { + set_comment("[ DeferredInlineSmiAdd"); + } + + virtual void Generate(); private: - Register value_; - Register key_; - Register receiver_; - Label patch_site_; + Register dst_; + Smi* value_; + OverwriteMode overwrite_mode_; }; -void DeferredReferenceSetKeyedValue::Generate() { - __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); - // Move value, receiver, and key to registers rax, rdx, and rcx, as - // the IC stub expects. - // Move value to rax, using xchg if the receiver or key is in rax. - if (!value_.is(rax)) { - if (!receiver_.is(rax) && !key_.is(rax)) { - __ movq(rax, value_); +void DeferredInlineSmiAdd::Generate() { + GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); + igostub.GenerateCall(masm_, dst_, value_); + if (!dst_.is(rax)) __ movq(dst_, rax); +} + + +// The result of value + src is in dst. It either overflowed or was not +// smi tagged. Undo the speculative addition and call the appropriate +// specialized stub for add. The result is left in dst. +class DeferredInlineSmiAddReversed: public DeferredCode { + public: + DeferredInlineSmiAddReversed(Register dst, + Smi* value, + OverwriteMode overwrite_mode) + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { + set_comment("[ DeferredInlineSmiAddReversed"); + } + + virtual void Generate(); + + private: + Register dst_; + Smi* value_; + OverwriteMode overwrite_mode_; +}; + + +void DeferredInlineSmiAddReversed::Generate() { + GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); + igostub.GenerateCall(masm_, value_, dst_); + if (!dst_.is(rax)) __ movq(dst_, rax); +} + + +class DeferredInlineSmiSub: public DeferredCode { + public: + DeferredInlineSmiSub(Register dst, + Smi* value, + OverwriteMode overwrite_mode) + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { + set_comment("[ DeferredInlineSmiSub"); + } + + virtual void Generate(); + + private: + Register dst_; + Smi* value_; + OverwriteMode overwrite_mode_; +}; + + + +void DeferredInlineSmiSub::Generate() { + GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); + igostub.GenerateCall(masm_, dst_, value_); + if (!dst_.is(rax)) __ movq(dst_, rax); +} + + +Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, + Result* operand, + Handle<Object> value, + bool reversed, + OverwriteMode overwrite_mode) { + // Generate inline code for a binary operation when one of the + // operands is a constant smi. Consumes the argument "operand". + if (IsUnsafeSmi(value)) { + Result unsafe_operand(value); + if (reversed) { + return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, + overwrite_mode); } else { - __ xchg(rax, value_); - // Update receiver_ and key_ if they are affected by the swap. - if (receiver_.is(rax)) { - receiver_ = value_; - } else if (receiver_.is(value_)) { - receiver_ = rax; + return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, + overwrite_mode); + } + } + + // Get the literal value. + Smi* smi_value = Smi::cast(*value); + int int_value = smi_value->value(); + + Token::Value op = expr->op(); + Result answer; + switch (op) { + case Token::ADD: { + operand->ToRegister(); + frame_->Spill(operand->reg()); + DeferredCode* deferred = NULL; + if (reversed) { + deferred = new DeferredInlineSmiAddReversed(operand->reg(), + smi_value, + overwrite_mode); + } else { + deferred = new DeferredInlineSmiAdd(operand->reg(), + smi_value, + overwrite_mode); } - if (key_.is(rax)) { - key_ = value_; - } else if (key_.is(value_)) { - key_ = rax; + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + __ SmiAddConstant(operand->reg(), + operand->reg(), + smi_value, + deferred->entry_label()); + deferred->BindExit(); + answer = *operand; + break; + } + + case Token::SUB: { + if (reversed) { + Result constant_operand(value); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); + } else { + operand->ToRegister(); + frame_->Spill(operand->reg()); + DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + // A smi currently fits in a 32-bit Immediate. + __ SmiSubConstant(operand->reg(), + operand->reg(), + smi_value, + deferred->entry_label()); + deferred->BindExit(); + answer = *operand; } + break; + } + + case Token::SAR: + if (reversed) { + Result constant_operand(value); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); + } else { + // Only the least significant 5 bits of the shift value are used. + // In the slow case, this masking is done inside the runtime call. + int shift_value = int_value & 0x1f; + operand->ToRegister(); + frame_->Spill(operand->reg()); + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, + operand->reg(), + operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + __ SmiShiftArithmeticRightConstant(operand->reg(), + operand->reg(), + shift_value); + deferred->BindExit(); + answer = *operand; + } + break; + + case Token::SHR: + if (reversed) { + Result constant_operand(value); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); + } else { + // Only the least significant 5 bits of the shift value are used. + // In the slow case, this masking is done inside the runtime call. + int shift_value = int_value & 0x1f; + operand->ToRegister(); + answer = allocator()->Allocate(); + ASSERT(answer.is_valid()); + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, + answer.reg(), + operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + __ SmiShiftLogicalRightConstant(answer.reg(), + operand->reg(), + shift_value, + deferred->entry_label()); + deferred->BindExit(); + operand->Unuse(); + } + break; + + case Token::SHL: + if (reversed) { + operand->ToRegister(); + + // We need rcx to be available to hold operand, and to be spilled. + // SmiShiftLeft implicitly modifies rcx. + if (operand->reg().is(rcx)) { + frame_->Spill(operand->reg()); + answer = allocator()->Allocate(); + } else { + Result rcx_reg = allocator()->Allocate(rcx); + // answer must not be rcx. + answer = allocator()->Allocate(); + // rcx_reg goes out of scope. + } + + DeferredInlineSmiOperationReversed* deferred = + new DeferredInlineSmiOperationReversed(op, + answer.reg(), + smi_value, + operand->reg(), + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + + __ Move(answer.reg(), smi_value); + __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg()); + operand->Unuse(); + + deferred->BindExit(); + } else { + // Only the least significant 5 bits of the shift value are used. + // In the slow case, this masking is done inside the runtime call. + int shift_value = int_value & 0x1f; + operand->ToRegister(); + if (shift_value == 0) { + // Spill operand so it can be overwritten in the slow case. + frame_->Spill(operand->reg()); + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, + operand->reg(), + operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + deferred->BindExit(); + answer = *operand; + } else { + // Use a fresh temporary for nonzero shift values. + answer = allocator()->Allocate(); + ASSERT(answer.is_valid()); + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, + answer.reg(), + operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + __ SmiShiftLeftConstant(answer.reg(), + operand->reg(), + shift_value); + deferred->BindExit(); + operand->Unuse(); + } + } + break; + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: { + operand->ToRegister(); + frame_->Spill(operand->reg()); + if (reversed) { + // Bit operations with a constant smi are commutative. + // We can swap left and right operands with no problem. + // Swap left and right overwrite modes. 0->0, 1->2, 2->1. + overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); + } + DeferredCode* deferred = new DeferredInlineSmiOperation(op, + operand->reg(), + operand->reg(), + smi_value, + overwrite_mode); + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), + deferred); + if (op == Token::BIT_AND) { + __ SmiAndConstant(operand->reg(), operand->reg(), smi_value); + } else if (op == Token::BIT_XOR) { + if (int_value != 0) { + __ SmiXorConstant(operand->reg(), operand->reg(), smi_value); + } + } else { + ASSERT(op == Token::BIT_OR); + if (int_value != 0) { + __ SmiOrConstant(operand->reg(), operand->reg(), smi_value); + } + } + deferred->BindExit(); + answer = *operand; + break; + } + + // Generate inline code for mod of powers of 2 and negative powers of 2. + case Token::MOD: + if (!reversed && + int_value != 0 && + (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { + operand->ToRegister(); + frame_->Spill(operand->reg()); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, + operand->reg(), + operand->reg(), + smi_value, + overwrite_mode); + // Check for negative or non-Smi left hand side. + __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label()); + if (int_value < 0) int_value = -int_value; + if (int_value == 1) { + __ Move(operand->reg(), Smi::FromInt(0)); + } else { + __ SmiAndConstant(operand->reg(), + operand->reg(), + Smi::FromInt(int_value - 1)); + } + deferred->BindExit(); + answer = *operand; + break; // This break only applies if we generated code for MOD. + } + // Fall through if we did not find a power of 2 on the right hand side! + // The next case must be the default. + + default: { + Result constant_operand(value); + if (reversed) { + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); + } else { + answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, + overwrite_mode); + } + break; } } - // Value is now in rax. Its original location is remembered in value_, - // and the value is restored to value_ before returning. - // The variables receiver_ and key_ are not preserved. - // Move receiver and key to rdx and rcx, swapping if necessary. - if (receiver_.is(rdx)) { - if (!key_.is(rcx)) { - __ movq(rcx, key_); - } // Else everything is already in the right place. - } else if (receiver_.is(rcx)) { - if (key_.is(rdx)) { - __ xchg(rcx, rdx); - } else if (key_.is(rcx)) { - __ movq(rdx, receiver_); + ASSERT(answer.is_valid()); + return answer; +} + +static bool CouldBeNaN(const Result& result) { + if (result.type_info().IsSmi()) return false; + if (result.type_info().IsInteger32()) return false; + if (!result.is_constant()) return true; + if (!result.handle()->IsHeapNumber()) return false; + return isnan(HeapNumber::cast(*result.handle())->value()); +} + + +// Convert from signed to unsigned comparison to match the way EFLAGS are set +// by FPU and XMM compare instructions. +static Condition DoubleCondition(Condition cc) { + switch (cc) { + case less: return below; + case equal: return equal; + case less_equal: return below_equal; + case greater: return above; + case greater_equal: return above_equal; + default: UNREACHABLE(); + } + UNREACHABLE(); + return equal; +} + + +void CodeGenerator::Comparison(AstNode* node, + Condition cc, + bool strict, + ControlDestination* dest) { + // Strict only makes sense for equality comparisons. + ASSERT(!strict || cc == equal); + + Result left_side; + Result right_side; + // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. + if (cc == greater || cc == less_equal) { + cc = ReverseCondition(cc); + left_side = frame_->Pop(); + right_side = frame_->Pop(); + } else { + right_side = frame_->Pop(); + left_side = frame_->Pop(); + } + ASSERT(cc == less || cc == equal || cc == greater_equal); + + // If either side is a constant smi, optimize the comparison. + bool left_side_constant_smi = false; + bool left_side_constant_null = false; + bool left_side_constant_1_char_string = false; + if (left_side.is_constant()) { + left_side_constant_smi = left_side.handle()->IsSmi(); + left_side_constant_null = left_side.handle()->IsNull(); + left_side_constant_1_char_string = + (left_side.handle()->IsString() && + String::cast(*left_side.handle())->length() == 1 && + String::cast(*left_side.handle())->IsAsciiRepresentation()); + } + bool right_side_constant_smi = false; + bool right_side_constant_null = false; + bool right_side_constant_1_char_string = false; + if (right_side.is_constant()) { + right_side_constant_smi = right_side.handle()->IsSmi(); + right_side_constant_null = right_side.handle()->IsNull(); + right_side_constant_1_char_string = + (right_side.handle()->IsString() && + String::cast(*right_side.handle())->length() == 1 && + String::cast(*right_side.handle())->IsAsciiRepresentation()); + } + + if (left_side_constant_smi || right_side_constant_smi) { + if (left_side_constant_smi && right_side_constant_smi) { + // Trivial case, comparing two constants. + int left_value = Smi::cast(*left_side.handle())->value(); + int right_value = Smi::cast(*right_side.handle())->value(); + switch (cc) { + case less: + dest->Goto(left_value < right_value); + break; + case equal: + dest->Goto(left_value == right_value); + break; + case greater_equal: + dest->Goto(left_value >= right_value); + break; + default: + UNREACHABLE(); + } } else { - __ movq(rdx, receiver_); - __ movq(rcx, key_); + // Only one side is a constant Smi. + // If left side is a constant Smi, reverse the operands. + // Since one side is a constant Smi, conversion order does not matter. + if (left_side_constant_smi) { + Result temp = left_side; + left_side = right_side; + right_side = temp; + cc = ReverseCondition(cc); + // This may re-introduce greater or less_equal as the value of cc. + // CompareStub and the inline code both support all values of cc. + } + // Implement comparison against a constant Smi, inlining the case + // where both sides are Smis. + left_side.ToRegister(); + Register left_reg = left_side.reg(); + Handle<Object> right_val = right_side.handle(); + + // Here we split control flow to the stub call and inlined cases + // before finally splitting it to the control destination. We use + // a jump target and branching to duplicate the virtual frame at + // the first split. We manually handle the off-frame references + // by reconstituting them on the non-fall-through path. + JumpTarget is_smi; + + if (left_side.is_smi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(left_side.reg()); + } + } else { + Condition left_is_smi = masm_->CheckSmi(left_side.reg()); + is_smi.Branch(left_is_smi); + + bool is_loop_condition = (node->AsExpression() != NULL) && + node->AsExpression()->is_loop_condition(); + if (!is_loop_condition && right_val->IsSmi()) { + // Right side is a constant smi and left side has been checked + // not to be a smi. + JumpTarget not_number; + __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset), + Factory::heap_number_map()); + not_number.Branch(not_equal, &left_side); + __ movsd(xmm1, + FieldOperand(left_reg, HeapNumber::kValueOffset)); + int value = Smi::cast(*right_val)->value(); + if (value == 0) { + __ xorpd(xmm0, xmm0); + } else { + Result temp = allocator()->Allocate(); + __ movl(temp.reg(), Immediate(value)); + __ cvtlsi2sd(xmm0, temp.reg()); + temp.Unuse(); + } + __ ucomisd(xmm1, xmm0); + // Jump to builtin for NaN. + not_number.Branch(parity_even, &left_side); + left_side.Unuse(); + dest->true_target()->Branch(DoubleCondition(cc)); + dest->false_target()->Jump(); + not_number.Bind(&left_side); + } + + // Setup and call the compare stub. + CompareStub stub(cc, strict, kCantBothBeNaN); + Result result = frame_->CallStub(&stub, &left_side, &right_side); + result.ToRegister(); + __ testq(result.reg(), result.reg()); + result.Unuse(); + dest->true_target()->Branch(cc); + dest->false_target()->Jump(); + + is_smi.Bind(); + } + + left_side = Result(left_reg); + right_side = Result(right_val); + // Test smi equality and comparison by signed int comparison. + // Both sides are smis, so we can use an Immediate. + __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle())); + left_side.Unuse(); + right_side.Unuse(); + dest->Split(cc); + } + } else if (cc == equal && + (left_side_constant_null || right_side_constant_null)) { + // To make null checks efficient, we check if either the left side or + // the right side is the constant 'null'. + // If so, we optimize the code by inlining a null check instead of + // calling the (very) general runtime routine for checking equality. + Result operand = left_side_constant_null ? right_side : left_side; + right_side.Unuse(); + left_side.Unuse(); + operand.ToRegister(); + __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); + if (strict) { + operand.Unuse(); + dest->Split(equal); + } else { + // The 'null' value is only equal to 'undefined' if using non-strict + // comparisons. + dest->true_target()->Branch(equal); + __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); + dest->true_target()->Branch(equal); + Condition is_smi = masm_->CheckSmi(operand.reg()); + dest->false_target()->Branch(is_smi); + + // It can be an undetectable object. + // Use a scratch register in preference to spilling operand.reg(). + Result temp = allocator()->Allocate(); + ASSERT(temp.is_valid()); + __ movq(temp.reg(), + FieldOperand(operand.reg(), HeapObject::kMapOffset)); + __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + temp.Unuse(); + operand.Unuse(); + dest->Split(not_zero); + } + } else if (left_side_constant_1_char_string || + right_side_constant_1_char_string) { + if (left_side_constant_1_char_string && right_side_constant_1_char_string) { + // Trivial case, comparing two constants. + int left_value = String::cast(*left_side.handle())->Get(0); + int right_value = String::cast(*right_side.handle())->Get(0); + switch (cc) { + case less: + dest->Goto(left_value < right_value); + break; + case equal: + dest->Goto(left_value == right_value); + break; + case greater_equal: + dest->Goto(left_value >= right_value); + break; + default: + UNREACHABLE(); + } + } else { + // Only one side is a constant 1 character string. + // If left side is a constant 1-character string, reverse the operands. + // Since one side is a constant string, conversion order does not matter. + if (left_side_constant_1_char_string) { + Result temp = left_side; + left_side = right_side; + right_side = temp; + cc = ReverseCondition(cc); + // This may reintroduce greater or less_equal as the value of cc. + // CompareStub and the inline code both support all values of cc. + } + // Implement comparison against a constant string, inlining the case + // where both sides are strings. + left_side.ToRegister(); + + // Here we split control flow to the stub call and inlined cases + // before finally splitting it to the control destination. We use + // a jump target and branching to duplicate the virtual frame at + // the first split. We manually handle the off-frame references + // by reconstituting them on the non-fall-through path. + JumpTarget is_not_string, is_string; + Register left_reg = left_side.reg(); + Handle<Object> right_val = right_side.handle(); + ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); + Condition is_smi = masm()->CheckSmi(left_reg); + is_not_string.Branch(is_smi, &left_side); + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + __ movq(temp.reg(), + FieldOperand(left_reg, HeapObject::kMapOffset)); + __ movzxbl(temp.reg(), + FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); + // If we are testing for equality then make use of the symbol shortcut. + // Check if the left hand side has the same type as the right hand + // side (which is always a symbol). + if (cc == equal) { + Label not_a_symbol; + ASSERT(kSymbolTag != 0); + // Ensure that no non-strings have the symbol bit set. + ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); + __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. + __ j(zero, ¬_a_symbol); + // They are symbols, so do identity compare. + __ Cmp(left_reg, right_side.handle()); + dest->true_target()->Branch(equal); + dest->false_target()->Branch(not_equal); + __ bind(¬_a_symbol); + } + // Call the compare stub if the left side is not a flat ascii string. + __ andb(temp.reg(), + Immediate(kIsNotStringMask | + kStringRepresentationMask | + kStringEncodingMask)); + __ cmpb(temp.reg(), + Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); + temp.Unuse(); + is_string.Branch(equal, &left_side); + + // Setup and call the compare stub. + is_not_string.Bind(&left_side); + CompareStub stub(cc, strict, kCantBothBeNaN); + Result result = frame_->CallStub(&stub, &left_side, &right_side); + result.ToRegister(); + __ testq(result.reg(), result.reg()); + result.Unuse(); + dest->true_target()->Branch(cc); + dest->false_target()->Jump(); + + is_string.Bind(&left_side); + // left_side is a sequential ASCII string. + ASSERT(left_side.reg().is(left_reg)); + right_side = Result(right_val); + Result temp2 = allocator_->Allocate(); + ASSERT(temp2.is_valid()); + // Test string equality and comparison. + if (cc == equal) { + Label comparison_done; + __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), + Smi::FromInt(1)); + __ j(not_equal, &comparison_done); + uint8_t char_value = + static_cast<uint8_t>(String::cast(*right_val)->Get(0)); + __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), + Immediate(char_value)); + __ bind(&comparison_done); + } else { + __ movq(temp2.reg(), + FieldOperand(left_side.reg(), String::kLengthOffset)); + __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1)); + Label comparison; + // If the length is 0 then the subtraction gave -1 which compares less + // than any character. + __ j(negative, &comparison); + // Otherwise load the first character. + __ movzxbl(temp2.reg(), + FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); + __ bind(&comparison); + // Compare the first character of the string with the + // constant 1-character string. + uint8_t char_value = + static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0)); + __ cmpb(temp2.reg(), Immediate(char_value)); + Label characters_were_different; + __ j(not_equal, &characters_were_different); + // If the first character is the same then the long string sorts after + // the short one. + __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), + Smi::FromInt(1)); + __ bind(&characters_were_different); + } + temp2.Unuse(); + left_side.Unuse(); + right_side.Unuse(); + dest->Split(cc); } - } else if (key_.is(rcx)) { - __ movq(rdx, receiver_); } else { - __ movq(rcx, key_); - __ movq(rdx, receiver_); + // Neither side is a constant Smi, constant 1-char string, or constant null. + // If either side is a non-smi constant, skip the smi check. + bool known_non_smi = + (left_side.is_constant() && !left_side.handle()->IsSmi()) || + (right_side.is_constant() && !right_side.handle()->IsSmi()) || + left_side.type_info().IsDouble() || + right_side.type_info().IsDouble(); + + NaNInformation nan_info = + (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? + kBothCouldBeNaN : + kCantBothBeNaN; + + // Inline number comparison handling any combination of smi's and heap + // numbers if: + // code is in a loop + // the compare operation is different from equal + // compare is not a for-loop comparison + // The reason for excluding equal is that it will most likely be done + // with smi's (not heap numbers) and the code to comparing smi's is inlined + // separately. The same reason applies for for-loop comparison which will + // also most likely be smi comparisons. + bool is_loop_condition = (node->AsExpression() != NULL) + && node->AsExpression()->is_loop_condition(); + bool inline_number_compare = + loop_nesting() > 0 && cc != equal && !is_loop_condition; + + left_side.ToRegister(); + right_side.ToRegister(); + + if (known_non_smi) { + // Inlined equality check: + // If at least one of the objects is not NaN, then if the objects + // are identical, they are equal. + if (nan_info == kCantBothBeNaN && cc == equal) { + __ cmpq(left_side.reg(), right_side.reg()); + dest->true_target()->Branch(equal); + } + + // Inlined number comparison: + if (inline_number_compare) { + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); + } + + CompareStub stub(cc, strict, nan_info, !inline_number_compare); + Result answer = frame_->CallStub(&stub, &left_side, &right_side); + __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag. + answer.Unuse(); + dest->Split(cc); + } else { + // Here we split control flow to the stub call and inlined cases + // before finally splitting it to the control destination. We use + // a jump target and branching to duplicate the virtual frame at + // the first split. We manually handle the off-frame references + // by reconstituting them on the non-fall-through path. + JumpTarget is_smi; + Register left_reg = left_side.reg(); + Register right_reg = right_side.reg(); + + Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg); + is_smi.Branch(both_smi); + + // Inline the equality check if both operands can't be a NaN. If both + // objects are the same they are equal. + if (nan_info == kCantBothBeNaN && cc == equal) { + __ cmpq(left_side.reg(), right_side.reg()); + dest->true_target()->Branch(equal); + } + + // Inlined number comparison: + if (inline_number_compare) { + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); + } + + CompareStub stub(cc, strict, nan_info, !inline_number_compare); + Result answer = frame_->CallStub(&stub, &left_side, &right_side); + __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags. + answer.Unuse(); + dest->true_target()->Branch(cc); + dest->false_target()->Jump(); + + is_smi.Bind(); + left_side = Result(left_reg); + right_side = Result(right_reg); + __ SmiCompare(left_side.reg(), right_side.reg()); + right_side.Unuse(); + left_side.Unuse(); + dest->Split(cc); + } } +} - // Call the IC stub. - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); - __ Call(ic, RelocInfo::CODE_TARGET); - // The delta from the start of the map-compare instructions (initial movq) - // to the test instruction. We use masm_-> directly here instead of the - // __ macro because the macro sometimes uses macro expansion to turn - // into something that can't return a value. This is encountered - // when doing generated code coverage tests. - int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); - // Here we use masm_-> instead of the __ macro because this is the - // instruction that gets patched and coverage code gets in the way. - masm_->testl(rax, Immediate(-delta_to_patch_site)); - // Restore value (returned from store IC). - if (!value_.is(rax)) __ movq(value_, rax); + +// Load a comparison operand into into a XMM register. Jump to not_numbers jump +// target passing the left and right result if the operand is not a number. +static void LoadComparisonOperand(MacroAssembler* masm_, + Result* operand, + XMMRegister xmm_reg, + Result* left_side, + Result* right_side, + JumpTarget* not_numbers) { + Label done; + if (operand->type_info().IsDouble()) { + // Operand is known to be a heap number, just load it. + __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + } else if (operand->type_info().IsSmi()) { + // Operand is known to be a smi. Convert it to double and keep the original + // smi. + __ SmiToInteger32(kScratchRegister, operand->reg()); + __ cvtlsi2sd(xmm_reg, kScratchRegister); + } else { + // Operand type not known, check for smi or heap number. + Label smi; + __ JumpIfSmi(operand->reg(), &smi); + if (!operand->type_info().IsNumber()) { + __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); + __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset), + kScratchRegister); + not_numbers->Branch(not_equal, left_side, right_side, taken); + } + __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&smi); + // Comvert smi to float and keep the original smi. + __ SmiToInteger32(kScratchRegister, operand->reg()); + __ cvtlsi2sd(xmm_reg, kScratchRegister); + __ jmp(&done); + } + __ bind(&done); +} + + +void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, + Result* right_side, + Condition cc, + ControlDestination* dest) { + ASSERT(left_side->is_register()); + ASSERT(right_side->is_register()); + + JumpTarget not_numbers; + // Load left and right operand into registers xmm0 and xmm1 and compare. + LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side, + ¬_numbers); + LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, + ¬_numbers); + __ ucomisd(xmm0, xmm1); + // Bail out if a NaN is involved. + not_numbers.Branch(parity_even, left_side, right_side); + + // Split to destination targets based on comparison. + left_side->Unuse(); + right_side->Unuse(); + dest->true_target()->Branch(DoubleCondition(cc)); + dest->false_target()->Jump(); + + not_numbers.Bind(left_side, right_side); +} + + +// Call the function just below TOS on the stack with the given +// arguments. The receiver is the TOS. +void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, + CallFunctionFlags flags, + int position) { + // Push the arguments ("left-to-right") on the stack. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + frame_->SpillTop(); + } + + // Record the position for debugging purposes. + CodeForSourcePosition(position); + + // Use the shared code stub to call the function. + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub call_function(arg_count, in_loop, flags); + Result answer = frame_->CallStub(&call_function, arg_count + 1); + // Restore context and replace function on the stack with the + // result of the stub invocation. + frame_->RestoreContextRegister(); + frame_->SetElementAt(0, &answer); } @@ -1010,6 +2720,21 @@ void CodeGenerator::VisitBlock(Block* node) { } +void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { + // Call the runtime to declare the globals. The inevitable call + // will sync frame elements to memory anyway, so we do it eagerly to + // allow us to push the arguments directly into place. + frame_->SyncRange(0, frame_->element_count() - 1); + + __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT); + frame_->EmitPush(rsi); // The context is the first argument. + frame_->EmitPush(kScratchRegister); + frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); + Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + // Return value is ignored. +} + + void CodeGenerator::VisitDeclaration(Declaration* node) { Comment cmnt(masm_, "[ Declaration"); Variable* var = node->proxy()->var(); @@ -1230,6 +2955,44 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { } +void CodeGenerator::GenerateReturnSequence(Result* return_value) { + // The return value is a live (but not currently reference counted) + // reference to rax. This is safe because the current frame does not + // contain a reference to rax (it is prepared for the return by spilling + // all registers). + if (FLAG_trace) { + frame_->Push(return_value); + *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); + } + return_value->ToRegister(rax); + + // Add a label for checking the size of the code used for returning. +#ifdef DEBUG + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + + // Leave the frame and return popping the arguments and the + // receiver. + frame_->Exit(); + masm_->ret((scope()->num_parameters() + 1) * kPointerSize); +#ifdef ENABLE_DEBUGGER_SUPPORT + // Add padding that will be overwritten by a debugger breakpoint. + // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k" + // with length 7 (3 + 1 + 3). + const int kPadding = Assembler::kJSReturnSequenceLength - 7; + for (int i = 0; i < kPadding; ++i) { + masm_->int3(); + } + // Check that the size of the code used for returning matches what is + // expected by the debugger. + ASSERT_EQ(Assembler::kJSReturnSequenceLength, + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); +#endif + DeleteFrame(); +} + + void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { ASSERT(!in_spilled_code()); Comment cmnt(masm_, "[ WithEnterStatement"); @@ -2531,6 +4294,349 @@ void CodeGenerator::VisitConditional(Conditional* node) { } +void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { + if (slot->type() == Slot::LOOKUP) { + ASSERT(slot->var()->is_dynamic()); + + JumpTarget slow; + JumpTarget done; + Result value; + + // Generate fast case for loading from slots that correspond to + // local/global variables or arguments unless they are shadowed by + // eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(slot, + typeof_state, + &value, + &slow, + &done); + + slow.Bind(); + // A runtime call is inevitable. We eagerly sync frame elements + // to memory so that we can push the arguments directly into place + // on top of the frame. + frame_->SyncRange(0, frame_->element_count() - 1); + frame_->EmitPush(rsi); + __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); + frame_->EmitPush(kScratchRegister); + if (typeof_state == INSIDE_TYPEOF) { + value = + frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + } else { + value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); + } + + done.Bind(&value); + frame_->Push(&value); + + } else if (slot->var()->mode() == Variable::CONST) { + // Const slots may contain 'the hole' value (the constant hasn't been + // initialized yet) which needs to be converted into the 'undefined' + // value. + // + // We currently spill the virtual frame because constants use the + // potentially unsafe direct-frame access of SlotOperand. + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Load const"); + JumpTarget exit; + __ movq(rcx, SlotOperand(slot, rcx)); + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); + exit.Branch(not_equal); + __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); + exit.Bind(); + frame_->EmitPush(rcx); + + } else if (slot->type() == Slot::PARAMETER) { + frame_->PushParameterAt(slot->index()); + + } else if (slot->type() == Slot::LOCAL) { + frame_->PushLocalAt(slot->index()); + + } else { + // The other remaining slot types (LOOKUP and GLOBAL) cannot reach + // here. + // + // The use of SlotOperand below is safe for an unspilled frame + // because it will always be a context slot. + ASSERT(slot->type() == Slot::CONTEXT); + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + __ movq(temp.reg(), SlotOperand(slot, temp.reg())); + frame_->Push(&temp); + } +} + + +void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, + TypeofState state) { + LoadFromSlot(slot, state); + + // Bail out quickly if we're not using lazy arguments allocation. + if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; + + // ... or if the slot isn't a non-parameter arguments slot. + if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; + + // Pop the loaded value from the stack. + Result value = frame_->Pop(); + + // If the loaded value is a constant, we know if the arguments + // object has been lazily loaded yet. + if (value.is_constant()) { + if (value.handle()->IsTheHole()) { + Result arguments = StoreArgumentsObject(false); + frame_->Push(&arguments); + } else { + frame_->Push(&value); + } + return; + } + + // The loaded value is in a register. If it is the sentinel that + // indicates that we haven't loaded the arguments object yet, we + // need to do it now. + JumpTarget exit; + __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); + frame_->Push(&value); + exit.Branch(not_equal); + Result arguments = StoreArgumentsObject(false); + frame_->SetElementAt(0, &arguments); + exit.Bind(); +} + + +Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( + Slot* slot, + TypeofState typeof_state, + JumpTarget* slow) { + // Check that no extension objects have been created by calls to + // eval from the current scope to the global scope. + Register context = rsi; + Result tmp = allocator_->Allocate(); + ASSERT(tmp.is_valid()); // All non-reserved registers were available. + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + slow->Branch(not_equal, not_taken); + } + // Load next context in chain. + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + context = tmp.reg(); + } + // If no outer scope calls eval, we do not need to check more + // context extensions. If we have reached an eval scope, we check + // all extensions from this point. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + // Loop up the context chain. There is no frame effect so it is + // safe to use raw labels here. + Label next, fast; + if (!context.is(tmp.reg())) { + __ movq(tmp.reg(), context); + } + // Load map for comparison into register, outside loop. + __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); + __ bind(&next); + // Terminate at global context. + __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); + __ j(equal, &fast); + // Check that extension is NULL. + __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); + slow->Branch(not_equal); + // Load next context in chain. + __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + __ jmp(&next); + __ bind(&fast); + } + tmp.Unuse(); + + // All extension objects were empty and it is safe to use a global + // load IC call. + LoadGlobal(); + frame_->Push(slot->var()->name()); + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT; + Result answer = frame_->CallLoadIC(mode); + // A test rax instruction following the call signals that the inobject + // property case was inlined. Ensure that there is not a test rax + // instruction here. + masm_->nop(); + return answer; +} + + +void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + Result* result, + JumpTarget* slow, + JumpTarget* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); + done->Jump(result); + + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + // Allocate a fresh register to use as a temp in + // ContextSlotOperandCheckExtensions and to hold the result + // value. + *result = allocator_->Allocate(); + ASSERT(result->is_valid()); + __ movq(result->reg(), + ContextSlotOperandCheckExtensions(potential_slot, + *result, + slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex); + done->Branch(not_equal, result); + __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex); + } + done->Jump(result); + } else if (rewrite != NULL) { + // Generate fast case for argument loads. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + Result arguments = allocator()->Allocate(); + ASSERT(arguments.is_valid()); + __ movq(arguments.reg(), + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), + arguments, + slow)); + frame_->Push(&arguments); + frame_->Push(key_literal->handle()); + *result = EmitKeyedLoad(); + done->Jump(result); + } + } + } + } +} + + +void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { + if (slot->type() == Slot::LOOKUP) { + ASSERT(slot->var()->is_dynamic()); + + // For now, just do a runtime call. Since the call is inevitable, + // we eagerly sync the virtual frame so we can directly push the + // arguments into place. + frame_->SyncRange(0, frame_->element_count() - 1); + + frame_->EmitPush(rsi); + frame_->EmitPush(slot->var()->name()); + + Result value; + if (init_state == CONST_INIT) { + // Same as the case for a normal store, but ignores attribute + // (e.g. READ_ONLY) of context slot so that we can initialize const + // properties (introduced via eval("const foo = (some expr);")). Also, + // uses the current function context instead of the top context. + // + // Note that we must declare the foo upon entry of eval(), via a + // context slot declaration, but we cannot initialize it at the same + // time, because the const declaration may be at the end of the eval + // code (sigh...) and the const variable may have been used before + // (where its value is 'undefined'). Thus, we can only do the + // initialization when we actually encounter the expression and when + // the expression operands are defined and valid, and thus we need the + // split into 2 operations: declaration of the context slot followed + // by initialization. + value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); + } else { + value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + } + // Storing a variable must keep the (new) value on the expression + // stack. This is necessary for compiling chained assignment + // expressions. + frame_->Push(&value); + } else { + ASSERT(!slot->var()->is_dynamic()); + + JumpTarget exit; + if (init_state == CONST_INIT) { + ASSERT(slot->var()->mode() == Variable::CONST); + // Only the first const initialization must be executed (the slot + // still contains 'the hole' value). When the assignment is executed, + // the code is identical to a normal store (see below). + // + // We spill the frame in the code below because the direct-frame + // access of SlotOperand is potentially unsafe with an unspilled + // frame. + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Init const"); + __ movq(rcx, SlotOperand(slot, rcx)); + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); + exit.Branch(not_equal); + } + + // We must execute the store. Storing a variable must keep the (new) + // value on the stack. This is necessary for compiling assignment + // expressions. + // + // Note: We will reach here even with slot->var()->mode() == + // Variable::CONST because of const declarations which will initialize + // consts to 'the hole' value and by doing so, end up calling this code. + if (slot->type() == Slot::PARAMETER) { + frame_->StoreToParameterAt(slot->index()); + } else if (slot->type() == Slot::LOCAL) { + frame_->StoreToLocalAt(slot->index()); + } else { + // The other slot types (LOOKUP and GLOBAL) cannot reach here. + // + // The use of SlotOperand below is safe for an unspilled frame + // because the slot is a context slot. + ASSERT(slot->type() == Slot::CONTEXT); + frame_->Dup(); + Result value = frame_->Pop(); + value.ToRegister(); + Result start = allocator_->Allocate(); + ASSERT(start.is_valid()); + __ movq(SlotOperand(slot, start.reg()), value.reg()); + // RecordWrite may destroy the value registers. + // + // TODO(204): Avoid actually spilling when the value is not + // needed (probably the common case). + frame_->Spill(value.reg()); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); + // The results start, value, and temp are unused by going out of + // scope. + } + + exit.Bind(); + } +} + + void CodeGenerator::VisitSlot(Slot* node) { Comment cmnt(masm_, "[ Slot"); LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); @@ -2557,6 +4663,17 @@ void CodeGenerator::VisitLiteral(Literal* node) { } +void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { + UNIMPLEMENTED(); + // TODO(X64): Implement security policy for loads of smis. +} + + +bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { + return false; +} + + // Materialize the regexp literal 'node' in the literals array // 'literals' of the function. Leave the regexp boilerplate in // 'boilerplate'. @@ -3245,905 +5362,48 @@ void CodeGenerator::VisitCallNew(CallNew* node) { } -void CodeGenerator::VisitCallRuntime(CallRuntime* node) { - if (CheckForInlineRuntimeCall(node)) { - return; - } - - ZoneList<Expression*>* args = node->arguments(); - Comment cmnt(masm_, "[ CallRuntime"); - Runtime::Function* function = node->function(); - - if (function == NULL) { - // Push the builtins object found in the current global object. - Result temp = allocator()->Allocate(); - ASSERT(temp.is_valid()); - __ movq(temp.reg(), GlobalObject()); - __ movq(temp.reg(), - FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); - frame_->Push(&temp); - } - - // Push the arguments ("left-to-right"). - int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - Load(args->at(i)); - } - - if (function == NULL) { - // Call the JS runtime function. - frame_->Push(node->name()); - Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, - arg_count, - loop_nesting_); - frame_->RestoreContextRegister(); - frame_->Push(&answer); - } else { - // Call the C runtime function. - Result answer = frame_->CallRuntime(function, arg_count); - frame_->Push(&answer); - } -} - - -void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { - Comment cmnt(masm_, "[ UnaryOperation"); - - Token::Value op = node->op(); - - if (op == Token::NOT) { - // Swap the true and false targets but keep the same actual label - // as the fall through. - destination()->Invert(); - LoadCondition(node->expression(), destination(), true); - // Swap the labels back. - destination()->Invert(); - - } else if (op == Token::DELETE) { - Property* property = node->expression()->AsProperty(); - if (property != NULL) { - Load(property->obj()); - Load(property->key()); - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); - frame_->Push(&answer); - return; - } - - Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); - if (variable != NULL) { - Slot* slot = variable->slot(); - if (variable->is_global()) { - LoadGlobal(); - frame_->Push(variable->name()); - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, - CALL_FUNCTION, 2); - frame_->Push(&answer); - return; - - } else if (slot != NULL && slot->type() == Slot::LOOKUP) { - // Call the runtime to look up the context holding the named - // variable. Sync the virtual frame eagerly so we can push the - // arguments directly into place. - frame_->SyncRange(0, frame_->element_count() - 1); - frame_->EmitPush(rsi); - frame_->EmitPush(variable->name()); - Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); - ASSERT(context.is_register()); - frame_->EmitPush(context.reg()); - context.Unuse(); - frame_->EmitPush(variable->name()); - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, - CALL_FUNCTION, 2); - frame_->Push(&answer); - return; - } - - // Default: Result of deleting non-global, not dynamically - // introduced variables is false. - frame_->Push(Factory::false_value()); - - } else { - // Default: Result of deleting expressions is true. - Load(node->expression()); // may have side-effects - frame_->SetElementAt(0, Factory::true_value()); - } - - } else if (op == Token::TYPEOF) { - // Special case for loading the typeof expression; see comment on - // LoadTypeofExpression(). - LoadTypeofExpression(node->expression()); - Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); - frame_->Push(&answer); - - } else if (op == Token::VOID) { - Expression* expression = node->expression(); - if (expression && expression->AsLiteral() && ( - expression->AsLiteral()->IsTrue() || - expression->AsLiteral()->IsFalse() || - expression->AsLiteral()->handle()->IsNumber() || - expression->AsLiteral()->handle()->IsString() || - expression->AsLiteral()->handle()->IsJSRegExp() || - expression->AsLiteral()->IsNull())) { - // Omit evaluating the value of the primitive literal. - // It will be discarded anyway, and can have no side effect. - frame_->Push(Factory::undefined_value()); - } else { - Load(node->expression()); - frame_->SetElementAt(0, Factory::undefined_value()); - } - - } else { - bool can_overwrite = - (node->expression()->AsBinaryOperation() != NULL && - node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); - UnaryOverwriteMode overwrite = - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - bool no_negative_zero = node->expression()->no_negative_zero(); - Load(node->expression()); - switch (op) { - case Token::NOT: - case Token::DELETE: - case Token::TYPEOF: - UNREACHABLE(); // handled above - break; - - case Token::SUB: { - GenericUnaryOpStub stub( - Token::SUB, - overwrite, - no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); - Result operand = frame_->Pop(); - Result answer = frame_->CallStub(&stub, &operand); - answer.set_type_info(TypeInfo::Number()); - frame_->Push(&answer); - break; - } - - case Token::BIT_NOT: { - // Smi check. - JumpTarget smi_label; - JumpTarget continue_label; - Result operand = frame_->Pop(); - operand.ToRegister(); - - Condition is_smi = masm_->CheckSmi(operand.reg()); - smi_label.Branch(is_smi, &operand); - - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); - Result answer = frame_->CallStub(&stub, &operand); - continue_label.Jump(&answer); - - smi_label.Bind(&answer); - answer.ToRegister(); - frame_->Spill(answer.reg()); - __ SmiNot(answer.reg(), answer.reg()); - continue_label.Bind(&answer); - answer.set_type_info(TypeInfo::Smi()); - frame_->Push(&answer); - break; - } - - case Token::ADD: { - // Smi check. - JumpTarget continue_label; - Result operand = frame_->Pop(); - TypeInfo operand_info = operand.type_info(); - operand.ToRegister(); - Condition is_smi = masm_->CheckSmi(operand.reg()); - continue_label.Branch(is_smi, &operand); - frame_->Push(&operand); - Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, - CALL_FUNCTION, 1); - - continue_label.Bind(&answer); - if (operand_info.IsSmi()) { - answer.set_type_info(TypeInfo::Smi()); - } else if (operand_info.IsInteger32()) { - answer.set_type_info(TypeInfo::Integer32()); - } else { - answer.set_type_info(TypeInfo::Number()); - } - frame_->Push(&answer); - break; - } - default: - UNREACHABLE(); - } - } -} - - -// The value in dst was optimistically incremented or decremented. -// The result overflowed or was not smi tagged. Call into the runtime -// to convert the argument to a number, and call the specialized add -// or subtract stub. The result is left in dst. -class DeferredPrefixCountOperation: public DeferredCode { - public: - DeferredPrefixCountOperation(Register dst, - bool is_increment, - TypeInfo input_type) - : dst_(dst), is_increment_(is_increment), input_type_(input_type) { - set_comment("[ DeferredCountOperation"); - } - - virtual void Generate(); - - private: - Register dst_; - bool is_increment_; - TypeInfo input_type_; -}; - - -void DeferredPrefixCountOperation::Generate() { - Register left; - if (input_type_.IsNumber()) { - left = dst_; - } else { - __ push(dst_); - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); - left = rax; - } - - GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, - NO_OVERWRITE, - NO_GENERIC_BINARY_FLAGS, - TypeInfo::Number()); - stub.GenerateCall(masm_, left, Smi::FromInt(1)); - - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -// The value in dst was optimistically incremented or decremented. -// The result overflowed or was not smi tagged. Call into the runtime -// to convert the argument to a number. Update the original value in -// old. Call the specialized add or subtract stub. The result is -// left in dst. -class DeferredPostfixCountOperation: public DeferredCode { - public: - DeferredPostfixCountOperation(Register dst, - Register old, - bool is_increment, - TypeInfo input_type) - : dst_(dst), - old_(old), - is_increment_(is_increment), - input_type_(input_type) { - set_comment("[ DeferredCountOperation"); - } - - virtual void Generate(); - - private: - Register dst_; - Register old_; - bool is_increment_; - TypeInfo input_type_; -}; - - -void DeferredPostfixCountOperation::Generate() { - Register left; - if (input_type_.IsNumber()) { - __ push(dst_); // Save the input to use as the old value. - left = dst_; - } else { - __ push(dst_); - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); - __ push(rax); // Save the result of ToNumber to use as the old value. - left = rax; - } - - GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, - NO_OVERWRITE, - NO_GENERIC_BINARY_FLAGS, - TypeInfo::Number()); - stub.GenerateCall(masm_, left, Smi::FromInt(1)); - - if (!dst_.is(rax)) __ movq(dst_, rax); - __ pop(old_); -} - - -void CodeGenerator::VisitCountOperation(CountOperation* node) { - Comment cmnt(masm_, "[ CountOperation"); - - bool is_postfix = node->is_postfix(); - bool is_increment = node->op() == Token::INC; - - Variable* var = node->expression()->AsVariableProxy()->AsVariable(); - bool is_const = (var != NULL && var->mode() == Variable::CONST); - - // Postfix operations need a stack slot under the reference to hold - // the old value while the new value is being stored. This is so that - // in the case that storing the new value requires a call, the old - // value will be in the frame to be spilled. - if (is_postfix) frame_->Push(Smi::FromInt(0)); - - // A constant reference is not saved to, so the reference is not a - // compound assignment reference. - { Reference target(this, node->expression(), !is_const); - if (target.is_illegal()) { - // Spoof the virtual frame to have the expected height (one higher - // than on entry). - if (!is_postfix) frame_->Push(Smi::FromInt(0)); - return; - } - target.TakeValue(); - - Result new_value = frame_->Pop(); - new_value.ToRegister(); - - Result old_value; // Only allocated in the postfix case. - if (is_postfix) { - // Allocate a temporary to preserve the old value. - old_value = allocator_->Allocate(); - ASSERT(old_value.is_valid()); - __ movq(old_value.reg(), new_value.reg()); - - // The return value for postfix operations is ToNumber(input). - // Keep more precise type info if the input is some kind of - // number already. If the input is not a number we have to wait - // for the deferred code to convert it. - if (new_value.type_info().IsNumber()) { - old_value.set_type_info(new_value.type_info()); - } - } - // Ensure the new value is writable. - frame_->Spill(new_value.reg()); - - DeferredCode* deferred = NULL; - if (is_postfix) { - deferred = new DeferredPostfixCountOperation(new_value.reg(), - old_value.reg(), - is_increment, - new_value.type_info()); - } else { - deferred = new DeferredPrefixCountOperation(new_value.reg(), - is_increment, - new_value.type_info()); - } - - if (new_value.is_smi()) { - if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); } - } else { - __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); - } - if (is_increment) { - __ SmiAddConstant(new_value.reg(), - new_value.reg(), - Smi::FromInt(1), - deferred->entry_label()); - } else { - __ SmiSubConstant(new_value.reg(), - new_value.reg(), - Smi::FromInt(1), - deferred->entry_label()); - } - deferred->BindExit(); - - // Postfix count operations return their input converted to - // number. The case when the input is already a number is covered - // above in the allocation code for old_value. - if (is_postfix && !new_value.type_info().IsNumber()) { - old_value.set_type_info(TypeInfo::Number()); - } - - new_value.set_type_info(TypeInfo::Number()); - - // Postfix: store the old value in the allocated slot under the - // reference. - if (is_postfix) frame_->SetElementAt(target.size(), &old_value); - - frame_->Push(&new_value); - // Non-constant: update the reference. - if (!is_const) target.SetValue(NOT_CONST_INIT); - } - - // Postfix: drop the new value and use the old. - if (is_postfix) frame_->Drop(); -} - - -void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { - // According to ECMA-262 section 11.11, page 58, the binary logical - // operators must yield the result of one of the two expressions - // before any ToBoolean() conversions. This means that the value - // produced by a && or || operator is not necessarily a boolean. - - // NOTE: If the left hand side produces a materialized value (not - // control flow), we force the right hand side to do the same. This - // is necessary because we assume that if we get control flow on the - // last path out of an expression we got it on all paths. - if (node->op() == Token::AND) { - JumpTarget is_true; - ControlDestination dest(&is_true, destination()->false_target(), true); - LoadCondition(node->left(), &dest, false); - - if (dest.false_was_fall_through()) { - // The current false target was used as the fall-through. If - // there are no dangling jumps to is_true then the left - // subexpression was unconditionally false. Otherwise we have - // paths where we do have to evaluate the right subexpression. - if (is_true.is_linked()) { - // We need to compile the right subexpression. If the jump to - // the current false target was a forward jump then we have a - // valid frame, we have just bound the false target, and we - // have to jump around the code for the right subexpression. - if (has_valid_frame()) { - destination()->false_target()->Unuse(); - destination()->false_target()->Jump(); - } - is_true.Bind(); - // The left subexpression compiled to control flow, so the - // right one is free to do so as well. - LoadCondition(node->right(), destination(), false); - } else { - // We have actually just jumped to or bound the current false - // target but the current control destination is not marked as - // used. - destination()->Use(false); - } - - } else if (dest.is_used()) { - // The left subexpression compiled to control flow (and is_true - // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), destination(), false); - - } else { - // We have a materialized value on the frame, so we exit with - // one on all paths. There are possibly also jumps to is_true - // from nested subexpressions. - JumpTarget pop_and_continue; - JumpTarget exit; - - // Avoid popping the result if it converts to 'false' using the - // standard ToBoolean() conversion as described in ECMA-262, - // section 9.2, page 30. - // - // Duplicate the TOS value. The duplicate will be popped by - // ToBoolean. - frame_->Dup(); - ControlDestination dest(&pop_and_continue, &exit, true); - ToBoolean(&dest); - - // Pop the result of evaluating the first part. - frame_->Drop(); - - // Compile right side expression. - is_true.Bind(); - Load(node->right()); - - // Exit (always with a materialized value). - exit.Bind(); - } - - } else { - ASSERT(node->op() == Token::OR); - JumpTarget is_false; - ControlDestination dest(destination()->true_target(), &is_false, false); - LoadCondition(node->left(), &dest, false); - - if (dest.true_was_fall_through()) { - // The current true target was used as the fall-through. If - // there are no dangling jumps to is_false then the left - // subexpression was unconditionally true. Otherwise we have - // paths where we do have to evaluate the right subexpression. - if (is_false.is_linked()) { - // We need to compile the right subexpression. If the jump to - // the current true target was a forward jump then we have a - // valid frame, we have just bound the true target, and we - // have to jump around the code for the right subexpression. - if (has_valid_frame()) { - destination()->true_target()->Unuse(); - destination()->true_target()->Jump(); - } - is_false.Bind(); - // The left subexpression compiled to control flow, so the - // right one is free to do so as well. - LoadCondition(node->right(), destination(), false); - } else { - // We have just jumped to or bound the current true target but - // the current control destination is not marked as used. - destination()->Use(true); - } - - } else if (dest.is_used()) { - // The left subexpression compiled to control flow (and is_false - // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), destination(), false); - - } else { - // We have a materialized value on the frame, so we exit with - // one on all paths. There are possibly also jumps to is_false - // from nested subexpressions. - JumpTarget pop_and_continue; - JumpTarget exit; - - // Avoid popping the result if it converts to 'true' using the - // standard ToBoolean() conversion as described in ECMA-262, - // section 9.2, page 30. - // - // Duplicate the TOS value. The duplicate will be popped by - // ToBoolean. - frame_->Dup(); - ControlDestination dest(&exit, &pop_and_continue, false); - ToBoolean(&dest); - - // Pop the result of evaluating the first part. - frame_->Drop(); - - // Compile right side expression. - is_false.Bind(); - Load(node->right()); - - // Exit (always with a materialized value). - exit.Bind(); - } - } -} - -void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { - Comment cmnt(masm_, "[ BinaryOperation"); - - if (node->op() == Token::AND || node->op() == Token::OR) { - GenerateLogicalBooleanOperation(node); - } else { - // NOTE: The code below assumes that the slow cases (calls to runtime) - // never return a constant/immutable object. - OverwriteMode overwrite_mode = NO_OVERWRITE; - if (node->left()->AsBinaryOperation() != NULL && - node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { - overwrite_mode = OVERWRITE_LEFT; - } else if (node->right()->AsBinaryOperation() != NULL && - node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { - overwrite_mode = OVERWRITE_RIGHT; - } - - if (node->left()->IsTrivial()) { - Load(node->right()); - Result right = frame_->Pop(); - frame_->Push(node->left()); - frame_->Push(&right); - } else { - Load(node->left()); - Load(node->right()); - } - GenericBinaryOperation(node, overwrite_mode); - } -} - - - -void CodeGenerator::VisitCompareOperation(CompareOperation* node) { - Comment cmnt(masm_, "[ CompareOperation"); - - // Get the expressions from the node. - Expression* left = node->left(); - Expression* right = node->right(); - Token::Value op = node->op(); - // To make typeof testing for natives implemented in JavaScript really - // efficient, we generate special code for expressions of the form: - // 'typeof <expression> == <string>'. - UnaryOperation* operation = left->AsUnaryOperation(); - if ((op == Token::EQ || op == Token::EQ_STRICT) && - (operation != NULL && operation->op() == Token::TYPEOF) && - (right->AsLiteral() != NULL && - right->AsLiteral()->handle()->IsString())) { - Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); - - // Load the operand and move it to a register. - LoadTypeofExpression(operation->expression()); - Result answer = frame_->Pop(); - answer.ToRegister(); - - if (check->Equals(Heap::number_symbol())) { - Condition is_smi = masm_->CheckSmi(answer.reg()); - destination()->true_target()->Branch(is_smi); - frame_->Spill(answer.reg()); - __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); - __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); - answer.Unuse(); - destination()->Split(equal); - - } else if (check->Equals(Heap::string_symbol())) { - Condition is_smi = masm_->CheckSmi(answer.reg()); - destination()->false_target()->Branch(is_smi); - - // It can be an undetectable string object. - __ movq(kScratchRegister, - FieldOperand(answer.reg(), HeapObject::kMapOffset)); - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - destination()->false_target()->Branch(not_zero); - __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); - answer.Unuse(); - destination()->Split(below); // Unsigned byte comparison needed. - - } else if (check->Equals(Heap::boolean_symbol())) { - __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); - destination()->true_target()->Branch(equal); - __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); - answer.Unuse(); - destination()->Split(equal); - - } else if (check->Equals(Heap::undefined_symbol())) { - __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); - destination()->true_target()->Branch(equal); - - Condition is_smi = masm_->CheckSmi(answer.reg()); - destination()->false_target()->Branch(is_smi); - - // It can be an undetectable object. - __ movq(kScratchRegister, - FieldOperand(answer.reg(), HeapObject::kMapOffset)); - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - answer.Unuse(); - destination()->Split(not_zero); - - } else if (check->Equals(Heap::function_symbol())) { - Condition is_smi = masm_->CheckSmi(answer.reg()); - destination()->false_target()->Branch(is_smi); - frame_->Spill(answer.reg()); - __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); - destination()->true_target()->Branch(equal); - // Regular expressions are callable so typeof == 'function'. - __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); - answer.Unuse(); - destination()->Split(equal); - - } else if (check->Equals(Heap::object_symbol())) { - Condition is_smi = masm_->CheckSmi(answer.reg()); - destination()->false_target()->Branch(is_smi); - __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); - destination()->true_target()->Branch(equal); - - // Regular expressions are typeof == 'function', not 'object'. - __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); - destination()->false_target()->Branch(equal); - - // It can be an undetectable object. - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - destination()->false_target()->Branch(not_zero); - __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); - destination()->false_target()->Branch(below); - __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); - answer.Unuse(); - destination()->Split(below_equal); - } else { - // Uncommon case: typeof testing against a string literal that is - // never returned from the typeof operator. - answer.Unuse(); - destination()->Goto(false); - } - return; - } - - Condition cc = no_condition; - bool strict = false; - switch (op) { - case Token::EQ_STRICT: - strict = true; - // Fall through - case Token::EQ: - cc = equal; - break; - case Token::LT: - cc = less; - break; - case Token::GT: - cc = greater; - break; - case Token::LTE: - cc = less_equal; - break; - case Token::GTE: - cc = greater_equal; - break; - case Token::IN: { - Load(left); - Load(right); - Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); - frame_->Push(&answer); // push the result - return; - } - case Token::INSTANCEOF: { - Load(left); - Load(right); - InstanceofStub stub; - Result answer = frame_->CallStub(&stub, 2); - answer.ToRegister(); - __ testq(answer.reg(), answer.reg()); - answer.Unuse(); - destination()->Split(zero); - return; - } - default: - UNREACHABLE(); - } - - if (left->IsTrivial()) { - Load(right); - Result right_result = frame_->Pop(); - frame_->Push(left); - frame_->Push(&right_result); - } else { - Load(left); - Load(right); - } - - Comparison(node, cc, strict, destination()); -} - - -void CodeGenerator::VisitThisFunction(ThisFunction* node) { - frame_->PushFunction(); -} - - -void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { - ASSERT(args->length() == 1); - - // ArgumentsAccessStub expects the key in rdx and the formal - // parameter count in rax. - Load(args->at(0)); - Result key = frame_->Pop(); - // Explicitly create a constant result. - Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters()))); - // Call the shared stub to get to arguments[key]. - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); - Result result = frame_->CallStub(&stub, &key, &count); - frame_->Push(&result); -} - - -void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); Load(args->at(0)); Result value = frame_->Pop(); value.ToRegister(); ASSERT(value.is_valid()); Condition is_smi = masm_->CheckSmi(value.reg()); - destination()->false_target()->Branch(is_smi); - // It is a heap object - get map. - // Check if the object is a JS array or not. - __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); value.Unuse(); - destination()->Split(equal); + destination()->Split(is_smi); } -void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { + // Conditionally generate a log call. + // Args: + // 0 (literal string): The type of logging (corresponds to the flags). + // This is used to determine whether or not to generate the log call. + // 1 (string): Format string. Access the string at argument index 2 + // with '%2s' (see Logger::LogRuntime for all the formats). + // 2 (array): Arguments to the format string. + ASSERT_EQ(args->length(), 3); +#ifdef ENABLE_LOGGING_AND_PROFILING + if (ShouldGenerateLog(args->at(0))) { + Load(args->at(1)); + Load(args->at(2)); + frame_->CallRuntime(Runtime::kLog, 2); + } +#endif + // Finally, we're expected to leave a value on the top of the stack. + frame_->Push(Factory::undefined_value()); +} + + +void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); Load(args->at(0)); Result value = frame_->Pop(); value.ToRegister(); ASSERT(value.is_valid()); - Condition is_smi = masm_->CheckSmi(value.reg()); - destination()->false_target()->Branch(is_smi); - // It is a heap object - get map. - // Check if the object is a regexp. - __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister); + Condition positive_smi = masm_->CheckPositiveSmi(value.reg()); value.Unuse(); - destination()->Split(equal); -} - - -void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { - // This generates a fast version of: - // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') - ASSERT(args->length() == 1); - Load(args->at(0)); - Result obj = frame_->Pop(); - obj.ToRegister(); - Condition is_smi = masm_->CheckSmi(obj.reg()); - destination()->false_target()->Branch(is_smi); - - __ Move(kScratchRegister, Factory::null_value()); - __ cmpq(obj.reg(), kScratchRegister); - destination()->true_target()->Branch(equal); - - __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); - // Undetectable objects behave like undefined when tested with typeof. - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - destination()->false_target()->Branch(not_zero); - __ movzxbq(kScratchRegister, - FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); - __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); - destination()->false_target()->Branch(below); - __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); - obj.Unuse(); - destination()->Split(below_equal); -} - - -void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { - // This generates a fast version of: - // (%_ClassOf(arg) === 'Function') - ASSERT(args->length() == 1); - Load(args->at(0)); - Result obj = frame_->Pop(); - obj.ToRegister(); - Condition is_smi = masm_->CheckSmi(obj.reg()); - destination()->false_target()->Branch(is_smi); - __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); - obj.Unuse(); - destination()->Split(equal); -} - - -void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { - ASSERT(args->length() == 1); - Load(args->at(0)); - Result obj = frame_->Pop(); - obj.ToRegister(); - Condition is_smi = masm_->CheckSmi(obj.reg()); - destination()->false_target()->Branch(is_smi); - __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); - __ movzxbl(kScratchRegister, - FieldOperand(kScratchRegister, Map::kBitFieldOffset)); - __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); - obj.Unuse(); - destination()->Split(not_zero); -} - - -void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { - ASSERT(args->length() == 0); - - // Get the frame pointer for the calling frame. - Result fp = allocator()->Allocate(); - __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - - // Skip the arguments adaptor frame if it exists. - Label check_frame_marker; - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(not_equal, &check_frame_marker); - __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); - - // Check the marker in the calling frame. - __ bind(&check_frame_marker); - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), - Smi::FromInt(StackFrame::CONSTRUCT)); - fp.Unuse(); - destination()->Split(equal); -} - - -void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { - ASSERT(args->length() == 0); - - Result fp = allocator_->Allocate(); - Result result = allocator_->Allocate(); - ASSERT(fp.is_valid() && result.is_valid()); - - Label exit; - - // Get the number of formal parameters. - __ Move(result.reg(), Smi::FromInt(scope()->num_parameters())); - - // Check if the calling frame is an arguments adaptor frame. - __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(not_equal, &exit); - - // Arguments adaptor case: Read the arguments length from the - // adaptor frame. - __ movq(result.reg(), - Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); - - __ bind(&exit); - result.set_type_info(TypeInfo::Smi()); - if (FLAG_debug_code) { - __ AbortIfNotSmi(result.reg()); - } - frame_->Push(&result); + destination()->Split(positive_smi); } @@ -4352,275 +5612,293 @@ void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) { } -void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); Load(args->at(0)); Result value = frame_->Pop(); value.ToRegister(); ASSERT(value.is_valid()); - Condition positive_smi = masm_->CheckPositiveSmi(value.reg()); + Condition is_smi = masm_->CheckSmi(value.reg()); + destination()->false_target()->Branch(is_smi); + // It is a heap object - get map. + // Check if the object is a JS array or not. + __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); value.Unuse(); - destination()->Split(positive_smi); + destination()->Split(equal); } -// Generates the Math.pow method. Only handles special cases and -// branches to the runtime system for everything else. Please note -// that this function assumes that the callsite has executed ToNumber -// on both arguments. -void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { - ASSERT(args->length() == 2); +void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); Load(args->at(0)); - Load(args->at(1)); + Result value = frame_->Pop(); + value.ToRegister(); + ASSERT(value.is_valid()); + Condition is_smi = masm_->CheckSmi(value.reg()); + destination()->false_target()->Branch(is_smi); + // It is a heap object - get map. + // Check if the object is a regexp. + __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister); + value.Unuse(); + destination()->Split(equal); +} - Label allocate_return; - // Load the two operands while leaving the values on the frame. - frame()->Dup(); - Result exponent = frame()->Pop(); - exponent.ToRegister(); - frame()->Spill(exponent.reg()); - frame()->PushElementAt(1); - Result base = frame()->Pop(); - base.ToRegister(); - frame()->Spill(base.reg()); - Result answer = allocator()->Allocate(); - ASSERT(answer.is_valid()); - ASSERT(!exponent.reg().is(base.reg())); - JumpTarget call_runtime; +void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { + // This generates a fast version of: + // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') + ASSERT(args->length() == 1); + Load(args->at(0)); + Result obj = frame_->Pop(); + obj.ToRegister(); + Condition is_smi = masm_->CheckSmi(obj.reg()); + destination()->false_target()->Branch(is_smi); - // Save 1 in xmm3 - we need this several times later on. - __ movl(answer.reg(), Immediate(1)); - __ cvtlsi2sd(xmm3, answer.reg()); + __ Move(kScratchRegister, Factory::null_value()); + __ cmpq(obj.reg(), kScratchRegister); + destination()->true_target()->Branch(equal); - Label exponent_nonsmi; - Label base_nonsmi; - // If the exponent is a heap number go to that specific case. - __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi); - __ JumpIfNotSmi(base.reg(), &base_nonsmi); + __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); + // Undetectable objects behave like undefined when tested with typeof. + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + destination()->false_target()->Branch(not_zero); + __ movzxbq(kScratchRegister, + FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); + __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); + destination()->false_target()->Branch(below); + __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); + obj.Unuse(); + destination()->Split(below_equal); +} - // Optimized version when y is an integer. - Label powi; - __ SmiToInteger32(base.reg(), base.reg()); - __ cvtlsi2sd(xmm0, base.reg()); - __ jmp(&powi); - // exponent is smi and base is a heapnumber. - __ bind(&base_nonsmi); - __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - call_runtime.Branch(not_equal); - __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); +void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { + // This generates a fast version of: + // (%_ClassOf(arg) === 'Function') + ASSERT(args->length() == 1); + Load(args->at(0)); + Result obj = frame_->Pop(); + obj.ToRegister(); + Condition is_smi = masm_->CheckSmi(obj.reg()); + destination()->false_target()->Branch(is_smi); + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); + obj.Unuse(); + destination()->Split(equal); +} - // Optimized version of pow if y is an integer. - __ bind(&powi); - __ SmiToInteger32(exponent.reg(), exponent.reg()); - // Save exponent in base as we need to check if exponent is negative later. - // We know that base and exponent are in different registers. - __ movl(base.reg(), exponent.reg()); +void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Result obj = frame_->Pop(); + obj.ToRegister(); + Condition is_smi = masm_->CheckSmi(obj.reg()); + destination()->false_target()->Branch(is_smi); + __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); + __ movzxbl(kScratchRegister, + FieldOperand(kScratchRegister, Map::kBitFieldOffset)); + __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); + obj.Unuse(); + destination()->Split(not_zero); +} - // Get absolute value of exponent. - Label no_neg; - __ cmpl(exponent.reg(), Immediate(0)); - __ j(greater_equal, &no_neg); - __ negl(exponent.reg()); - __ bind(&no_neg); - // Load xmm1 with 1. - __ movsd(xmm1, xmm3); - Label while_true; - Label no_multiply; +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); - __ bind(&while_true); - __ shrl(exponent.reg(), Immediate(1)); - __ j(not_carry, &no_multiply); - __ mulsd(xmm1, xmm0); - __ bind(&no_multiply); - __ testl(exponent.reg(), exponent.reg()); - __ mulsd(xmm0, xmm0); - __ j(not_zero, &while_true); + // Get the frame pointer for the calling frame. + Result fp = allocator()->Allocate(); + __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - // x has the original value of y - if y is negative return 1/result. - __ testl(base.reg(), base.reg()); - __ j(positive, &allocate_return); - // Special case if xmm1 has reached infinity. - __ movl(answer.reg(), Immediate(0x7FB00000)); - __ movd(xmm0, answer.reg()); - __ cvtss2sd(xmm0, xmm0); - __ ucomisd(xmm0, xmm1); - call_runtime.Branch(equal); - __ divsd(xmm3, xmm1); - __ movsd(xmm1, xmm3); - __ jmp(&allocate_return); + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(not_equal, &check_frame_marker); + __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); - // exponent (or both) is a heapnumber - no matter what we should now work - // on doubles. - __ bind(&exponent_nonsmi); - __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - call_runtime.Branch(not_equal); - __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); - // Test if exponent is nan. - __ ucomisd(xmm1, xmm1); - call_runtime.Branch(parity_even); + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), + Smi::FromInt(StackFrame::CONSTRUCT)); + fp.Unuse(); + destination()->Split(equal); +} - Label base_not_smi; - Label handle_special_cases; - __ JumpIfNotSmi(base.reg(), &base_not_smi); - __ SmiToInteger32(base.reg(), base.reg()); - __ cvtlsi2sd(xmm0, base.reg()); - __ jmp(&handle_special_cases); - __ bind(&base_not_smi); - __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - call_runtime.Branch(not_equal); - __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); - __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask)); - __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask)); - // base is NaN or +/-Infinity - call_runtime.Branch(greater_equal); - __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); - // base is in xmm0 and exponent is in xmm1. - __ bind(&handle_special_cases); - Label not_minus_half; - // Test for -0.5. - // Load xmm2 with -0.5. - __ movl(answer.reg(), Immediate(0xBF000000)); - __ movd(xmm2, answer.reg()); - __ cvtss2sd(xmm2, xmm2); - // xmm2 now has -0.5. - __ ucomisd(xmm2, xmm1); - __ j(not_equal, ¬_minus_half); +void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); - // Calculates reciprocal of square root. - // Note that 1/sqrt(x) = sqrt(1/x)) - __ divsd(xmm3, xmm0); - __ movsd(xmm1, xmm3); - __ sqrtsd(xmm1, xmm1); - __ jmp(&allocate_return); + Result fp = allocator_->Allocate(); + Result result = allocator_->Allocate(); + ASSERT(fp.is_valid() && result.is_valid()); - // Test for 0.5. - __ bind(¬_minus_half); - // Load xmm2 with 0.5. - // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. - __ addsd(xmm2, xmm3); - // xmm2 now has 0.5. - __ ucomisd(xmm2, xmm1); - call_runtime.Branch(not_equal); + Label exit; - // Calculates square root. - __ movsd(xmm1, xmm0); - __ sqrtsd(xmm1, xmm1); + // Get the number of formal parameters. + __ Move(result.reg(), Smi::FromInt(scope()->num_parameters())); - JumpTarget done; - Label failure, success; - __ bind(&allocate_return); - // Make a copy of the frame to enable us to handle allocation - // failure after the JumpTarget jump. - VirtualFrame* clone = new VirtualFrame(frame()); - __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure); - __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1); - // Remove the two original values from the frame - we only need those - // in the case where we branch to runtime. - frame()->Drop(2); - exponent.Unuse(); - base.Unuse(); - done.Jump(&answer); - // Use the copy of the original frame as our current frame. - RegisterFile empty_regs; - SetFrame(clone, &empty_regs); - // If we experience an allocation failure we branch to runtime. - __ bind(&failure); - call_runtime.Bind(); - answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2); + // Check if the calling frame is an arguments adaptor frame. + __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(not_equal, &exit); - done.Bind(&answer); - frame()->Push(&answer); + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ movq(result.reg(), + Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); + result.set_type_info(TypeInfo::Smi()); + if (FLAG_debug_code) { + __ AbortIfNotSmi(result.reg()); + } + frame_->Push(&result); } -// Generates the Math.sqrt method. Please note - this function assumes that -// the callsite has executed ToNumber on the argument. -void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); - Load(args->at(0)); + JumpTarget leave, null, function, non_function_constructor; + Load(args->at(0)); // Load the object. + Result obj = frame_->Pop(); + obj.ToRegister(); + frame_->Spill(obj.reg()); - // Leave original value on the frame if we need to call runtime. - frame()->Dup(); - Result result = frame()->Pop(); - result.ToRegister(); - frame()->Spill(result.reg()); - Label runtime; - Label non_smi; - Label load_done; - JumpTarget end; + // If the object is a smi, we return null. + Condition is_smi = masm_->CheckSmi(obj.reg()); + null.Branch(is_smi); - __ JumpIfNotSmi(result.reg(), &non_smi); - __ SmiToInteger32(result.reg(), result.reg()); - __ cvtlsi2sd(xmm0, result.reg()); - __ jmp(&load_done); - __ bind(&non_smi); - __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &runtime); - __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. - __ bind(&load_done); - __ sqrtsd(xmm0, xmm0); - // A copy of the virtual frame to allow us to go to runtime after the - // JumpTarget jump. - Result scratch = allocator()->Allocate(); - VirtualFrame* clone = new VirtualFrame(frame()); - __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime); + __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); + null.Branch(below); - __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0); - frame()->Drop(1); - scratch.Unuse(); - end.Jump(&result); - // We only branch to runtime if we have an allocation error. - // Use the copy of the original frame as our current frame. - RegisterFile empty_regs; - SetFrame(clone, &empty_regs); - __ bind(&runtime); - result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); + function.Branch(equal); - end.Bind(&result); - frame()->Push(&result); + // Check if the constructor in the map is a function. + __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); + non_function_constructor.Branch(not_equal); + + // The obj register now contains the constructor function. Grab the + // instance class name from there. + __ movq(obj.reg(), + FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); + __ movq(obj.reg(), + FieldOperand(obj.reg(), + SharedFunctionInfo::kInstanceClassNameOffset)); + frame_->Push(&obj); + leave.Jump(); + + // Functions have class 'Function'. + function.Bind(); + frame_->Push(Factory::function_class_symbol()); + leave.Jump(); + + // Objects with a non-function constructor have class 'Object'. + non_function_constructor.Bind(); + frame_->Push(Factory::Object_symbol()); + leave.Jump(); + + // Non-JS objects have class null. + null.Bind(); + frame_->Push(Factory::null_value()); + + // All done. + leave.Bind(); } -void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { +void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); - Load(args->at(0)); + JumpTarget leave; + Load(args->at(0)); // Load the object. + frame_->Dup(); + Result object = frame_->Pop(); + object.ToRegister(); + ASSERT(object.is_valid()); + // if (object->IsSmi()) return object. + Condition is_smi = masm_->CheckSmi(object.reg()); + leave.Branch(is_smi); + // It is a heap object - get map. + Result temp = allocator()->Allocate(); + ASSERT(temp.is_valid()); + // if (!object->IsJSValue()) return object. + __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); + leave.Branch(not_equal); + __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); + object.Unuse(); + frame_->SetElementAt(0, &temp); + leave.Bind(); +} + + +void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); + JumpTarget leave; + Load(args->at(0)); // Load the object. + Load(args->at(1)); // Load the value. Result value = frame_->Pop(); + Result object = frame_->Pop(); value.ToRegister(); - ASSERT(value.is_valid()); - Condition is_smi = masm_->CheckSmi(value.reg()); - value.Unuse(); - destination()->Split(is_smi); + object.ToRegister(); + + // if (object->IsSmi()) return value. + Condition is_smi = masm_->CheckSmi(object.reg()); + leave.Branch(is_smi, &value); + + // It is a heap object - get its map. + Result scratch = allocator_->Allocate(); + ASSERT(scratch.is_valid()); + // if (!object->IsJSValue()) return value. + __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); + leave.Branch(not_equal, &value); + + // Store the value. + __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); + // Update the write barrier. Save the value as it will be + // overwritten by the write barrier code and is needed afterward. + Result duplicate_value = allocator_->Allocate(); + ASSERT(duplicate_value.is_valid()); + __ movq(duplicate_value.reg(), value.reg()); + // The object register is also overwritten by the write barrier and + // possibly aliased in the frame. + frame_->Spill(object.reg()); + __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), + scratch.reg()); + object.Unuse(); + scratch.Unuse(); + duplicate_value.Unuse(); + + // Leave. + leave.Bind(&value); + frame_->Push(&value); } -void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ASSERT_EQ(args->length(), 3); -#ifdef ENABLE_LOGGING_AND_PROFILING - if (ShouldGenerateLog(args->at(0))) { - Load(args->at(1)); - Load(args->at(2)); - frame_->CallRuntime(Runtime::kLog, 2); - } -#endif - // Finally, we're expected to leave a value on the top of the stack. - frame_->Push(Factory::undefined_value()); +void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + + // ArgumentsAccessStub expects the key in rdx and the formal + // parameter count in rax. + Load(args->at(0)); + Result key = frame_->Pop(); + // Explicitly create a constant result. + Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters()))); + // Call the shared stub to get to arguments[key]. + ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + Result result = frame_->CallStub(&stub, &key, &count); + frame_->Push(&result); } @@ -4664,11 +5942,8 @@ void CodeGenerator::GenerateRandomHeapNumber( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ Push(Smi::FromInt(0)); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ movq(rbx, rax); __ bind(&heapnumber_allocated); @@ -4695,6 +5970,43 @@ void CodeGenerator::GenerateRandomHeapNumber( } +void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { + ASSERT_EQ(2, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + + StringAddStub stub(NO_STRING_ADD_FLAGS); + Result answer = frame_->CallStub(&stub, 2); + frame_->Push(&answer); +} + + +void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { + ASSERT_EQ(3, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + Load(args->at(2)); + + SubStringStub stub; + Result answer = frame_->CallStub(&stub, 3); + frame_->Push(&answer); +} + + +void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { + ASSERT_EQ(2, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + + StringCompareStub stub; + Result answer = frame_->CallStub(&stub, 2); + frame_->Push(&answer); +} + + void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) { ASSERT_EQ(args->length(), 4); @@ -5136,1723 +6448,1004 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) { } -void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { - ASSERT_EQ(args->length(), 1); - Load(args->at(0)); - TranscendentalCacheStub stub(TranscendentalCache::SIN); - Result result = frame_->CallStub(&stub, 1); - frame_->Push(&result); -} - - -void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { - ASSERT_EQ(args->length(), 1); - Load(args->at(0)); - TranscendentalCacheStub stub(TranscendentalCache::COS); - Result result = frame_->CallStub(&stub, 1); - frame_->Push(&result); -} - - -void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { - ASSERT_EQ(2, args->length()); - +// Generates the Math.pow method. Only handles special cases and +// branches to the runtime system for everything else. Please note +// that this function assumes that the callsite has executed ToNumber +// on both arguments. +void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); Load(args->at(0)); Load(args->at(1)); - StringAddStub stub(NO_STRING_ADD_FLAGS); - Result answer = frame_->CallStub(&stub, 2); - frame_->Push(&answer); -} - + Label allocate_return; + // Load the two operands while leaving the values on the frame. + frame()->Dup(); + Result exponent = frame()->Pop(); + exponent.ToRegister(); + frame()->Spill(exponent.reg()); + frame()->PushElementAt(1); + Result base = frame()->Pop(); + base.ToRegister(); + frame()->Spill(base.reg()); -void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { - ASSERT_EQ(3, args->length()); + Result answer = allocator()->Allocate(); + ASSERT(answer.is_valid()); + ASSERT(!exponent.reg().is(base.reg())); + JumpTarget call_runtime; - Load(args->at(0)); - Load(args->at(1)); - Load(args->at(2)); + // Save 1 in xmm3 - we need this several times later on. + __ movl(answer.reg(), Immediate(1)); + __ cvtlsi2sd(xmm3, answer.reg()); - SubStringStub stub; - Result answer = frame_->CallStub(&stub, 3); - frame_->Push(&answer); -} + Label exponent_nonsmi; + Label base_nonsmi; + // If the exponent is a heap number go to that specific case. + __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi); + __ JumpIfNotSmi(base.reg(), &base_nonsmi); + // Optimized version when y is an integer. + Label powi; + __ SmiToInteger32(base.reg(), base.reg()); + __ cvtlsi2sd(xmm0, base.reg()); + __ jmp(&powi); + // exponent is smi and base is a heapnumber. + __ bind(&base_nonsmi); + __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + call_runtime.Branch(not_equal); -void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { - ASSERT_EQ(2, args->length()); + __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); - Load(args->at(0)); - Load(args->at(1)); + // Optimized version of pow if y is an integer. + __ bind(&powi); + __ SmiToInteger32(exponent.reg(), exponent.reg()); - StringCompareStub stub; - Result answer = frame_->CallStub(&stub, 2); - frame_->Push(&answer); -} + // Save exponent in base as we need to check if exponent is negative later. + // We know that base and exponent are in different registers. + __ movl(base.reg(), exponent.reg()); + // Get absolute value of exponent. + Label no_neg; + __ cmpl(exponent.reg(), Immediate(0)); + __ j(greater_equal, &no_neg); + __ negl(exponent.reg()); + __ bind(&no_neg); -void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { - ASSERT(args->length() == 1); - JumpTarget leave, null, function, non_function_constructor; - Load(args->at(0)); // Load the object. - Result obj = frame_->Pop(); - obj.ToRegister(); - frame_->Spill(obj.reg()); + // Load xmm1 with 1. + __ movsd(xmm1, xmm3); + Label while_true; + Label no_multiply; - // If the object is a smi, we return null. - Condition is_smi = masm_->CheckSmi(obj.reg()); - null.Branch(is_smi); + __ bind(&while_true); + __ shrl(exponent.reg(), Immediate(1)); + __ j(not_carry, &no_multiply); + __ mulsd(xmm1, xmm0); + __ bind(&no_multiply); + __ testl(exponent.reg(), exponent.reg()); + __ mulsd(xmm0, xmm0); + __ j(not_zero, &while_true); - // Check that the object is a JS object but take special care of JS - // functions to make sure they have 'Function' as their class. + // x has the original value of y - if y is negative return 1/result. + __ testl(base.reg(), base.reg()); + __ j(positive, &allocate_return); + // Special case if xmm1 has reached infinity. + __ movl(answer.reg(), Immediate(0x7FB00000)); + __ movd(xmm0, answer.reg()); + __ cvtss2sd(xmm0, xmm0); + __ ucomisd(xmm0, xmm1); + call_runtime.Branch(equal); + __ divsd(xmm3, xmm1); + __ movsd(xmm1, xmm3); + __ jmp(&allocate_return); - __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); - null.Branch(below); + // exponent (or both) is a heapnumber - no matter what we should now work + // on doubles. + __ bind(&exponent_nonsmi); + __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + call_runtime.Branch(not_equal); + __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); + // Test if exponent is nan. + __ ucomisd(xmm1, xmm1); + call_runtime.Branch(parity_even); - // As long as JS_FUNCTION_TYPE is the last instance type and it is - // right after LAST_JS_OBJECT_TYPE, we can avoid checking for - // LAST_JS_OBJECT_TYPE. - ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); - __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); - function.Branch(equal); + Label base_not_smi; + Label handle_special_cases; + __ JumpIfNotSmi(base.reg(), &base_not_smi); + __ SmiToInteger32(base.reg(), base.reg()); + __ cvtlsi2sd(xmm0, base.reg()); + __ jmp(&handle_special_cases); + __ bind(&base_not_smi); + __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + call_runtime.Branch(not_equal); + __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); + __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask)); + __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask)); + // base is NaN or +/-Infinity + call_runtime.Branch(greater_equal); + __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); - // Check if the constructor in the map is a function. - __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); - __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); - non_function_constructor.Branch(not_equal); + // base is in xmm0 and exponent is in xmm1. + __ bind(&handle_special_cases); + Label not_minus_half; + // Test for -0.5. + // Load xmm2 with -0.5. + __ movl(answer.reg(), Immediate(0xBF000000)); + __ movd(xmm2, answer.reg()); + __ cvtss2sd(xmm2, xmm2); + // xmm2 now has -0.5. + __ ucomisd(xmm2, xmm1); + __ j(not_equal, ¬_minus_half); - // The obj register now contains the constructor function. Grab the - // instance class name from there. - __ movq(obj.reg(), - FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); - __ movq(obj.reg(), - FieldOperand(obj.reg(), - SharedFunctionInfo::kInstanceClassNameOffset)); - frame_->Push(&obj); - leave.Jump(); + // Calculates reciprocal of square root. + // Note that 1/sqrt(x) = sqrt(1/x)) + __ divsd(xmm3, xmm0); + __ movsd(xmm1, xmm3); + __ sqrtsd(xmm1, xmm1); + __ jmp(&allocate_return); - // Functions have class 'Function'. - function.Bind(); - frame_->Push(Factory::function_class_symbol()); - leave.Jump(); + // Test for 0.5. + __ bind(¬_minus_half); + // Load xmm2 with 0.5. + // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. + __ addsd(xmm2, xmm3); + // xmm2 now has 0.5. + __ ucomisd(xmm2, xmm1); + call_runtime.Branch(not_equal); - // Objects with a non-function constructor have class 'Object'. - non_function_constructor.Bind(); - frame_->Push(Factory::Object_symbol()); - leave.Jump(); + // Calculates square root. + __ movsd(xmm1, xmm0); + __ sqrtsd(xmm1, xmm1); - // Non-JS objects have class null. - null.Bind(); - frame_->Push(Factory::null_value()); + JumpTarget done; + Label failure, success; + __ bind(&allocate_return); + // Make a copy of the frame to enable us to handle allocation + // failure after the JumpTarget jump. + VirtualFrame* clone = new VirtualFrame(frame()); + __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure); + __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1); + // Remove the two original values from the frame - we only need those + // in the case where we branch to runtime. + frame()->Drop(2); + exponent.Unuse(); + base.Unuse(); + done.Jump(&answer); + // Use the copy of the original frame as our current frame. + RegisterFile empty_regs; + SetFrame(clone, &empty_regs); + // If we experience an allocation failure we branch to runtime. + __ bind(&failure); + call_runtime.Bind(); + answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2); - // All done. - leave.Bind(); + done.Bind(&answer); + frame()->Push(&answer); } -void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { - ASSERT(args->length() == 2); - JumpTarget leave; - Load(args->at(0)); // Load the object. - Load(args->at(1)); // Load the value. - Result value = frame_->Pop(); - Result object = frame_->Pop(); - value.ToRegister(); - object.ToRegister(); - - // if (object->IsSmi()) return value. - Condition is_smi = masm_->CheckSmi(object.reg()); - leave.Branch(is_smi, &value); - - // It is a heap object - get its map. - Result scratch = allocator_->Allocate(); - ASSERT(scratch.is_valid()); - // if (!object->IsJSValue()) return value. - __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); - leave.Branch(not_equal, &value); - - // Store the value. - __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); - // Update the write barrier. Save the value as it will be - // overwritten by the write barrier code and is needed afterward. - Result duplicate_value = allocator_->Allocate(); - ASSERT(duplicate_value.is_valid()); - __ movq(duplicate_value.reg(), value.reg()); - // The object register is also overwritten by the write barrier and - // possibly aliased in the frame. - frame_->Spill(object.reg()); - __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), - scratch.reg()); - object.Unuse(); - scratch.Unuse(); - duplicate_value.Unuse(); - - // Leave. - leave.Bind(&value); - frame_->Push(&value); +void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + Load(args->at(0)); + TranscendentalCacheStub stub(TranscendentalCache::SIN); + Result result = frame_->CallStub(&stub, 1); + frame_->Push(&result); } -void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { - ASSERT(args->length() == 1); - JumpTarget leave; - Load(args->at(0)); // Load the object. - frame_->Dup(); - Result object = frame_->Pop(); - object.ToRegister(); - ASSERT(object.is_valid()); - // if (object->IsSmi()) return object. - Condition is_smi = masm_->CheckSmi(object.reg()); - leave.Branch(is_smi); - // It is a heap object - get map. - Result temp = allocator()->Allocate(); - ASSERT(temp.is_valid()); - // if (!object->IsJSValue()) return object. - __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); - leave.Branch(not_equal); - __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); - object.Unuse(); - frame_->SetElementAt(0, &temp); - leave.Bind(); +void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + Load(args->at(0)); + TranscendentalCacheStub stub(TranscendentalCache::COS); + Result result = frame_->CallStub(&stub, 1); + frame_->Push(&result); } -// ----------------------------------------------------------------------------- -// CodeGenerator implementation of Expressions - -void CodeGenerator::LoadAndSpill(Expression* expression) { - // TODO(x64): No architecture specific code. Move to shared location. - ASSERT(in_spilled_code()); - set_in_spilled_code(false); - Load(expression); - frame_->SpillAll(); - set_in_spilled_code(true); -} - +// Generates the Math.sqrt method. Please note - this function assumes that +// the callsite has executed ToNumber on the argument. +void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); -void CodeGenerator::Load(Expression* expr) { -#ifdef DEBUG - int original_height = frame_->height(); -#endif - ASSERT(!in_spilled_code()); - JumpTarget true_target; - JumpTarget false_target; - ControlDestination dest(&true_target, &false_target, true); - LoadCondition(expr, &dest, false); + // Leave original value on the frame if we need to call runtime. + frame()->Dup(); + Result result = frame()->Pop(); + result.ToRegister(); + frame()->Spill(result.reg()); + Label runtime; + Label non_smi; + Label load_done; + JumpTarget end; - if (dest.false_was_fall_through()) { - // The false target was just bound. - JumpTarget loaded; - frame_->Push(Factory::false_value()); - // There may be dangling jumps to the true target. - if (true_target.is_linked()) { - loaded.Jump(); - true_target.Bind(); - frame_->Push(Factory::true_value()); - loaded.Bind(); - } + __ JumpIfNotSmi(result.reg(), &non_smi); + __ SmiToInteger32(result.reg(), result.reg()); + __ cvtlsi2sd(xmm0, result.reg()); + __ jmp(&load_done); + __ bind(&non_smi); + __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &runtime); + __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); - } else if (dest.is_used()) { - // There is true, and possibly false, control flow (with true as - // the fall through). - JumpTarget loaded; - frame_->Push(Factory::true_value()); - if (false_target.is_linked()) { - loaded.Jump(); - false_target.Bind(); - frame_->Push(Factory::false_value()); - loaded.Bind(); - } + __ bind(&load_done); + __ sqrtsd(xmm0, xmm0); + // A copy of the virtual frame to allow us to go to runtime after the + // JumpTarget jump. + Result scratch = allocator()->Allocate(); + VirtualFrame* clone = new VirtualFrame(frame()); + __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime); - } else { - // We have a valid value on top of the frame, but we still may - // have dangling jumps to the true and false targets from nested - // subexpressions (eg, the left subexpressions of the - // short-circuited boolean operators). - ASSERT(has_valid_frame()); - if (true_target.is_linked() || false_target.is_linked()) { - JumpTarget loaded; - loaded.Jump(); // Don't lose the current TOS. - if (true_target.is_linked()) { - true_target.Bind(); - frame_->Push(Factory::true_value()); - if (false_target.is_linked()) { - loaded.Jump(); - } - } - if (false_target.is_linked()) { - false_target.Bind(); - frame_->Push(Factory::false_value()); - } - loaded.Bind(); - } - } + __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0); + frame()->Drop(1); + scratch.Unuse(); + end.Jump(&result); + // We only branch to runtime if we have an allocation error. + // Use the copy of the original frame as our current frame. + RegisterFile empty_regs; + SetFrame(clone, &empty_regs); + __ bind(&runtime); + result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); - ASSERT(has_valid_frame()); - ASSERT(frame_->height() == original_height + 1); + end.Bind(&result); + frame()->Push(&result); } -// Emit code to load the value of an expression to the top of the -// frame. If the expression is boolean-valued it may be compiled (or -// partially compiled) into control flow to the control destination. -// If force_control is true, control flow is forced. -void CodeGenerator::LoadCondition(Expression* x, - ControlDestination* dest, - bool force_control) { - ASSERT(!in_spilled_code()); - int original_height = frame_->height(); +void CodeGenerator::VisitCallRuntime(CallRuntime* node) { + if (CheckForInlineRuntimeCall(node)) { + return; + } - { CodeGenState new_state(this, dest); - Visit(x); + ZoneList<Expression*>* args = node->arguments(); + Comment cmnt(masm_, "[ CallRuntime"); + Runtime::Function* function = node->function(); - // If we hit a stack overflow, we may not have actually visited - // the expression. In that case, we ensure that we have a - // valid-looking frame state because we will continue to generate - // code as we unwind the C++ stack. - // - // It's possible to have both a stack overflow and a valid frame - // state (eg, a subexpression overflowed, visiting it returned - // with a dummied frame state, and visiting this expression - // returned with a normal-looking state). - if (HasStackOverflow() && - !dest->is_used() && - frame_->height() == original_height) { - dest->Goto(true); - } + if (function == NULL) { + // Push the builtins object found in the current global object. + Result temp = allocator()->Allocate(); + ASSERT(temp.is_valid()); + __ movq(temp.reg(), GlobalObject()); + __ movq(temp.reg(), + FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); + frame_->Push(&temp); } - if (force_control && !dest->is_used()) { - // Convert the TOS value into flow to the control destination. - // TODO(X64): Make control flow to control destinations work. - ToBoolean(dest); + // Push the arguments ("left-to-right"). + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); } - ASSERT(!(force_control && !dest->is_used())); - ASSERT(dest->is_used() || frame_->height() == original_height + 1); -} - - -// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and -// convert it to a boolean in the condition code register or jump to -// 'false_target'/'true_target' as appropriate. -void CodeGenerator::ToBoolean(ControlDestination* dest) { - Comment cmnt(masm_, "[ ToBoolean"); - - // The value to convert should be popped from the frame. - Result value = frame_->Pop(); - value.ToRegister(); - - if (value.is_number()) { - // Fast case if TypeInfo indicates only numbers. - if (FLAG_debug_code) { - __ AbortIfNotNumber(value.reg()); - } - // Smi => false iff zero. - __ SmiCompare(value.reg(), Smi::FromInt(0)); - if (value.is_smi()) { - value.Unuse(); - dest->Split(not_zero); - } else { - dest->false_target()->Branch(equal); - Condition is_smi = masm_->CheckSmi(value.reg()); - dest->true_target()->Branch(is_smi); - __ xorpd(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset)); - value.Unuse(); - dest->Split(not_zero); - } + if (function == NULL) { + // Call the JS runtime function. + frame_->Push(node->name()); + Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, + arg_count, + loop_nesting_); + frame_->RestoreContextRegister(); + frame_->Push(&answer); } else { - // Fast case checks. - // 'false' => false. - __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); - dest->false_target()->Branch(equal); - - // 'true' => true. - __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); - dest->true_target()->Branch(equal); - - // 'undefined' => false. - __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); - dest->false_target()->Branch(equal); - - // Smi => false iff zero. - __ SmiCompare(value.reg(), Smi::FromInt(0)); - dest->false_target()->Branch(equal); - Condition is_smi = masm_->CheckSmi(value.reg()); - dest->true_target()->Branch(is_smi); - - // Call the stub for all other cases. - frame_->Push(&value); // Undo the Pop() from above. - ToBooleanStub stub; - Result temp = frame_->CallStub(&stub, 1); - // Convert the result to a condition code. - __ testq(temp.reg(), temp.reg()); - temp.Unuse(); - dest->Split(not_equal); + // Call the C runtime function. + Result answer = frame_->CallRuntime(function, arg_count); + frame_->Push(&answer); } } -void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { - UNIMPLEMENTED(); - // TODO(X64): Implement security policy for loads of smis. -} - - -bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { - return false; -} - -//------------------------------------------------------------------------------ -// CodeGenerator implementation of variables, lookups, and stores. +void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { + Comment cmnt(masm_, "[ UnaryOperation"); -Reference::Reference(CodeGenerator* cgen, - Expression* expression, - bool persist_after_get) - : cgen_(cgen), - expression_(expression), - type_(ILLEGAL), - persist_after_get_(persist_after_get) { - cgen->LoadReference(this); -} + Token::Value op = node->op(); + if (op == Token::NOT) { + // Swap the true and false targets but keep the same actual label + // as the fall through. + destination()->Invert(); + LoadCondition(node->expression(), destination(), true); + // Swap the labels back. + destination()->Invert(); -Reference::~Reference() { - ASSERT(is_unloaded() || is_illegal()); -} + } else if (op == Token::DELETE) { + Property* property = node->expression()->AsProperty(); + if (property != NULL) { + Load(property->obj()); + Load(property->key()); + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); + frame_->Push(&answer); + return; + } + Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); + if (variable != NULL) { + Slot* slot = variable->slot(); + if (variable->is_global()) { + LoadGlobal(); + frame_->Push(variable->name()); + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, + CALL_FUNCTION, 2); + frame_->Push(&answer); + return; -void CodeGenerator::LoadReference(Reference* ref) { - // References are loaded from both spilled and unspilled code. Set the - // state to unspilled to allow that (and explicitly spill after - // construction at the construction sites). - bool was_in_spilled_code = in_spilled_code_; - in_spilled_code_ = false; + } else if (slot != NULL && slot->type() == Slot::LOOKUP) { + // Call the runtime to look up the context holding the named + // variable. Sync the virtual frame eagerly so we can push the + // arguments directly into place. + frame_->SyncRange(0, frame_->element_count() - 1); + frame_->EmitPush(rsi); + frame_->EmitPush(variable->name()); + Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); + ASSERT(context.is_register()); + frame_->EmitPush(context.reg()); + context.Unuse(); + frame_->EmitPush(variable->name()); + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, + CALL_FUNCTION, 2); + frame_->Push(&answer); + return; + } - Comment cmnt(masm_, "[ LoadReference"); - Expression* e = ref->expression(); - Property* property = e->AsProperty(); - Variable* var = e->AsVariableProxy()->AsVariable(); + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->Push(Factory::false_value()); - if (property != NULL) { - // The expression is either a property or a variable proxy that rewrites - // to a property. - Load(property->obj()); - if (property->key()->IsPropertyName()) { - ref->set_type(Reference::NAMED); } else { - Load(property->key()); - ref->set_type(Reference::KEYED); + // Default: Result of deleting expressions is true. + Load(node->expression()); // may have side-effects + frame_->SetElementAt(0, Factory::true_value()); } - } else if (var != NULL) { - // The expression is a variable proxy that does not rewrite to a - // property. Global variables are treated as named property references. - if (var->is_global()) { - // If rax is free, the register allocator prefers it. Thus the code - // generator will load the global object into rax, which is where - // LoadIC wants it. Most uses of Reference call LoadIC directly - // after the reference is created. - frame_->Spill(rax); - LoadGlobal(); - ref->set_type(Reference::NAMED); + + } else if (op == Token::TYPEOF) { + // Special case for loading the typeof expression; see comment on + // LoadTypeofExpression(). + LoadTypeofExpression(node->expression()); + Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); + frame_->Push(&answer); + + } else if (op == Token::VOID) { + Expression* expression = node->expression(); + if (expression && expression->AsLiteral() && ( + expression->AsLiteral()->IsTrue() || + expression->AsLiteral()->IsFalse() || + expression->AsLiteral()->handle()->IsNumber() || + expression->AsLiteral()->handle()->IsString() || + expression->AsLiteral()->handle()->IsJSRegExp() || + expression->AsLiteral()->IsNull())) { + // Omit evaluating the value of the primitive literal. + // It will be discarded anyway, and can have no side effect. + frame_->Push(Factory::undefined_value()); } else { - ASSERT(var->slot() != NULL); - ref->set_type(Reference::SLOT); + Load(node->expression()); + frame_->SetElementAt(0, Factory::undefined_value()); } - } else { - // Anything else is a runtime error. - Load(e); - frame_->CallRuntime(Runtime::kThrowReferenceError, 1); - } - - in_spilled_code_ = was_in_spilled_code; -} + } else { + bool can_overwrite = + (node->expression()->AsBinaryOperation() != NULL && + node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); + UnaryOverwriteMode overwrite = + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + bool no_negative_zero = node->expression()->no_negative_zero(); + Load(node->expression()); + switch (op) { + case Token::NOT: + case Token::DELETE: + case Token::TYPEOF: + UNREACHABLE(); // handled above + break; -void CodeGenerator::UnloadReference(Reference* ref) { - // Pop a reference from the stack while preserving TOS. - Comment cmnt(masm_, "[ UnloadReference"); - frame_->Nip(ref->size()); - ref->set_unloaded(); -} + case Token::SUB: { + GenericUnaryOpStub stub( + Token::SUB, + overwrite, + no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); + Result operand = frame_->Pop(); + Result answer = frame_->CallStub(&stub, &operand); + answer.set_type_info(TypeInfo::Number()); + frame_->Push(&answer); + break; + } + case Token::BIT_NOT: { + // Smi check. + JumpTarget smi_label; + JumpTarget continue_label; + Result operand = frame_->Pop(); + operand.ToRegister(); -Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { - // Currently, this assertion will fail if we try to assign to - // a constant variable that is constant because it is read-only - // (such as the variable referring to a named function expression). - // We need to implement assignments to read-only variables. - // Ideally, we should do this during AST generation (by converting - // such assignments into expression statements); however, in general - // we may not be able to make the decision until past AST generation, - // that is when the entire program is known. - ASSERT(slot != NULL); - int index = slot->index(); - switch (slot->type()) { - case Slot::PARAMETER: - return frame_->ParameterAt(index); + Condition is_smi = masm_->CheckSmi(operand.reg()); + smi_label.Branch(is_smi, &operand); - case Slot::LOCAL: - return frame_->LocalAt(index); + GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); + Result answer = frame_->CallStub(&stub, &operand); + continue_label.Jump(&answer); - case Slot::CONTEXT: { - // Follow the context chain if necessary. - ASSERT(!tmp.is(rsi)); // do not overwrite context register - Register context = rsi; - int chain_length = scope()->ContextChainLength(slot->var()->scope()); - for (int i = 0; i < chain_length; i++) { - // Load the closure. - // (All contexts, even 'with' contexts, have a closure, - // and it is the same for all contexts inside a function. - // There is no need to go to the function context first.) - __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); - // Load the function context (which is the incoming, outer context). - __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); - context = tmp; + smi_label.Bind(&answer); + answer.ToRegister(); + frame_->Spill(answer.reg()); + __ SmiNot(answer.reg(), answer.reg()); + continue_label.Bind(&answer); + answer.set_type_info(TypeInfo::Smi()); + frame_->Push(&answer); + break; } - // We may have a 'with' context now. Get the function context. - // (In fact this mov may never be the needed, since the scope analysis - // may not permit a direct context access in this case and thus we are - // always at a function context. However it is safe to dereference be- - // cause the function context of a function context is itself. Before - // deleting this mov we should try to create a counter-example first, - // though...) - __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); - return ContextOperand(tmp, index); - } - default: - UNREACHABLE(); - return Operand(rsp, 0); - } -} - - -Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, - Result tmp, - JumpTarget* slow) { - ASSERT(slot->type() == Slot::CONTEXT); - ASSERT(tmp.is_register()); - Register context = rsi; + case Token::ADD: { + // Smi check. + JumpTarget continue_label; + Result operand = frame_->Pop(); + TypeInfo operand_info = operand.type_info(); + operand.ToRegister(); + Condition is_smi = masm_->CheckSmi(operand.reg()); + continue_label.Branch(is_smi, &operand); + frame_->Push(&operand); + Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, + CALL_FUNCTION, 1); - for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { - if (s->num_heap_slots() > 0) { - if (s->calls_eval()) { - // Check that extension is NULL. - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), - Immediate(0)); - slow->Branch(not_equal, not_taken); + continue_label.Bind(&answer); + if (operand_info.IsSmi()) { + answer.set_type_info(TypeInfo::Smi()); + } else if (operand_info.IsInteger32()) { + answer.set_type_info(TypeInfo::Integer32()); + } else { + answer.set_type_info(TypeInfo::Number()); + } + frame_->Push(&answer); + break; } - __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); - context = tmp.reg(); + default: + UNREACHABLE(); } } - // Check that last extension is NULL. - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); - slow->Branch(not_equal, not_taken); - __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); - return ContextOperand(tmp.reg(), slot->index()); } -void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { - if (slot->type() == Slot::LOOKUP) { - ASSERT(slot->var()->is_dynamic()); - - JumpTarget slow; - JumpTarget done; - Result value; - - // Generate fast case for loading from slots that correspond to - // local/global variables or arguments unless they are shadowed by - // eval-introduced bindings. - EmitDynamicLoadFromSlotFastCase(slot, - typeof_state, - &value, - &slow, - &done); - - slow.Bind(); - // A runtime call is inevitable. We eagerly sync frame elements - // to memory so that we can push the arguments directly into place - // on top of the frame. - frame_->SyncRange(0, frame_->element_count() - 1); - frame_->EmitPush(rsi); - __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); - frame_->EmitPush(kScratchRegister); - if (typeof_state == INSIDE_TYPEOF) { - value = - frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); - } else { - value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); - } - - done.Bind(&value); - frame_->Push(&value); +// The value in dst was optimistically incremented or decremented. +// The result overflowed or was not smi tagged. Call into the runtime +// to convert the argument to a number, and call the specialized add +// or subtract stub. The result is left in dst. +class DeferredPrefixCountOperation: public DeferredCode { + public: + DeferredPrefixCountOperation(Register dst, + bool is_increment, + TypeInfo input_type) + : dst_(dst), is_increment_(is_increment), input_type_(input_type) { + set_comment("[ DeferredCountOperation"); + } - } else if (slot->var()->mode() == Variable::CONST) { - // Const slots may contain 'the hole' value (the constant hasn't been - // initialized yet) which needs to be converted into the 'undefined' - // value. - // - // We currently spill the virtual frame because constants use the - // potentially unsafe direct-frame access of SlotOperand. - VirtualFrame::SpilledScope spilled_scope; - Comment cmnt(masm_, "[ Load const"); - JumpTarget exit; - __ movq(rcx, SlotOperand(slot, rcx)); - __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); - exit.Branch(not_equal); - __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); - exit.Bind(); - frame_->EmitPush(rcx); + virtual void Generate(); - } else if (slot->type() == Slot::PARAMETER) { - frame_->PushParameterAt(slot->index()); + private: + Register dst_; + bool is_increment_; + TypeInfo input_type_; +}; - } else if (slot->type() == Slot::LOCAL) { - frame_->PushLocalAt(slot->index()); +void DeferredPrefixCountOperation::Generate() { + Register left; + if (input_type_.IsNumber()) { + left = dst_; } else { - // The other remaining slot types (LOOKUP and GLOBAL) cannot reach - // here. - // - // The use of SlotOperand below is safe for an unspilled frame - // because it will always be a context slot. - ASSERT(slot->type() == Slot::CONTEXT); - Result temp = allocator_->Allocate(); - ASSERT(temp.is_valid()); - __ movq(temp.reg(), SlotOperand(slot, temp.reg())); - frame_->Push(&temp); + __ push(dst_); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + left = rax; } + + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, + NO_OVERWRITE, + NO_GENERIC_BINARY_FLAGS, + TypeInfo::Number()); + stub.GenerateCall(masm_, left, Smi::FromInt(1)); + + if (!dst_.is(rax)) __ movq(dst_, rax); } -void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, - TypeofState state) { - LoadFromSlot(slot, state); +// The value in dst was optimistically incremented or decremented. +// The result overflowed or was not smi tagged. Call into the runtime +// to convert the argument to a number. Update the original value in +// old. Call the specialized add or subtract stub. The result is +// left in dst. +class DeferredPostfixCountOperation: public DeferredCode { + public: + DeferredPostfixCountOperation(Register dst, + Register old, + bool is_increment, + TypeInfo input_type) + : dst_(dst), + old_(old), + is_increment_(is_increment), + input_type_(input_type) { + set_comment("[ DeferredCountOperation"); + } - // Bail out quickly if we're not using lazy arguments allocation. - if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; + virtual void Generate(); - // ... or if the slot isn't a non-parameter arguments slot. - if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; + private: + Register dst_; + Register old_; + bool is_increment_; + TypeInfo input_type_; +}; - // Pop the loaded value from the stack. - Result value = frame_->Pop(); - // If the loaded value is a constant, we know if the arguments - // object has been lazily loaded yet. - if (value.is_constant()) { - if (value.handle()->IsTheHole()) { - Result arguments = StoreArgumentsObject(false); - frame_->Push(&arguments); - } else { - frame_->Push(&value); - } - return; +void DeferredPostfixCountOperation::Generate() { + Register left; + if (input_type_.IsNumber()) { + __ push(dst_); // Save the input to use as the old value. + left = dst_; + } else { + __ push(dst_); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + __ push(rax); // Save the result of ToNumber to use as the old value. + left = rax; } - // The loaded value is in a register. If it is the sentinel that - // indicates that we haven't loaded the arguments object yet, we - // need to do it now. - JumpTarget exit; - __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); - frame_->Push(&value); - exit.Branch(not_equal); - Result arguments = StoreArgumentsObject(false); - frame_->SetElementAt(0, &arguments); - exit.Bind(); + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, + NO_OVERWRITE, + NO_GENERIC_BINARY_FLAGS, + TypeInfo::Number()); + stub.GenerateCall(masm_, left, Smi::FromInt(1)); + + if (!dst_.is(rax)) __ movq(dst_, rax); + __ pop(old_); } -void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { - if (slot->type() == Slot::LOOKUP) { - ASSERT(slot->var()->is_dynamic()); - - // For now, just do a runtime call. Since the call is inevitable, - // we eagerly sync the virtual frame so we can directly push the - // arguments into place. - frame_->SyncRange(0, frame_->element_count() - 1); +void CodeGenerator::VisitCountOperation(CountOperation* node) { + Comment cmnt(masm_, "[ CountOperation"); - frame_->EmitPush(rsi); - frame_->EmitPush(slot->var()->name()); + bool is_postfix = node->is_postfix(); + bool is_increment = node->op() == Token::INC; - Result value; - if (init_state == CONST_INIT) { - // Same as the case for a normal store, but ignores attribute - // (e.g. READ_ONLY) of context slot so that we can initialize const - // properties (introduced via eval("const foo = (some expr);")). Also, - // uses the current function context instead of the top context. - // - // Note that we must declare the foo upon entry of eval(), via a - // context slot declaration, but we cannot initialize it at the same - // time, because the const declaration may be at the end of the eval - // code (sigh...) and the const variable may have been used before - // (where its value is 'undefined'). Thus, we can only do the - // initialization when we actually encounter the expression and when - // the expression operands are defined and valid, and thus we need the - // split into 2 operations: declaration of the context slot followed - // by initialization. - value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); - } - // Storing a variable must keep the (new) value on the expression - // stack. This is necessary for compiling chained assignment - // expressions. - frame_->Push(&value); - } else { - ASSERT(!slot->var()->is_dynamic()); + Variable* var = node->expression()->AsVariableProxy()->AsVariable(); + bool is_const = (var != NULL && var->mode() == Variable::CONST); - JumpTarget exit; - if (init_state == CONST_INIT) { - ASSERT(slot->var()->mode() == Variable::CONST); - // Only the first const initialization must be executed (the slot - // still contains 'the hole' value). When the assignment is executed, - // the code is identical to a normal store (see below). - // - // We spill the frame in the code below because the direct-frame - // access of SlotOperand is potentially unsafe with an unspilled - // frame. - VirtualFrame::SpilledScope spilled_scope; - Comment cmnt(masm_, "[ Init const"); - __ movq(rcx, SlotOperand(slot, rcx)); - __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); - exit.Branch(not_equal); - } + // Postfix operations need a stack slot under the reference to hold + // the old value while the new value is being stored. This is so that + // in the case that storing the new value requires a call, the old + // value will be in the frame to be spilled. + if (is_postfix) frame_->Push(Smi::FromInt(0)); - // We must execute the store. Storing a variable must keep the (new) - // value on the stack. This is necessary for compiling assignment - // expressions. - // - // Note: We will reach here even with slot->var()->mode() == - // Variable::CONST because of const declarations which will initialize - // consts to 'the hole' value and by doing so, end up calling this code. - if (slot->type() == Slot::PARAMETER) { - frame_->StoreToParameterAt(slot->index()); - } else if (slot->type() == Slot::LOCAL) { - frame_->StoreToLocalAt(slot->index()); - } else { - // The other slot types (LOOKUP and GLOBAL) cannot reach here. - // - // The use of SlotOperand below is safe for an unspilled frame - // because the slot is a context slot. - ASSERT(slot->type() == Slot::CONTEXT); - frame_->Dup(); - Result value = frame_->Pop(); - value.ToRegister(); - Result start = allocator_->Allocate(); - ASSERT(start.is_valid()); - __ movq(SlotOperand(slot, start.reg()), value.reg()); - // RecordWrite may destroy the value registers. - // - // TODO(204): Avoid actually spilling when the value is not - // needed (probably the common case). - frame_->Spill(value.reg()); - int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - Result temp = allocator_->Allocate(); - ASSERT(temp.is_valid()); - __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); - // The results start, value, and temp are unused by going out of - // scope. + // A constant reference is not saved to, so the reference is not a + // compound assignment reference. + { Reference target(this, node->expression(), !is_const); + if (target.is_illegal()) { + // Spoof the virtual frame to have the expected height (one higher + // than on entry). + if (!is_postfix) frame_->Push(Smi::FromInt(0)); + return; } + target.TakeValue(); - exit.Bind(); - } -} - + Result new_value = frame_->Pop(); + new_value.ToRegister(); -Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( - Slot* slot, - TypeofState typeof_state, - JumpTarget* slow) { - // Check that no extension objects have been created by calls to - // eval from the current scope to the global scope. - Register context = rsi; - Result tmp = allocator_->Allocate(); - ASSERT(tmp.is_valid()); // All non-reserved registers were available. + Result old_value; // Only allocated in the postfix case. + if (is_postfix) { + // Allocate a temporary to preserve the old value. + old_value = allocator_->Allocate(); + ASSERT(old_value.is_valid()); + __ movq(old_value.reg(), new_value.reg()); - Scope* s = scope(); - while (s != NULL) { - if (s->num_heap_slots() > 0) { - if (s->calls_eval()) { - // Check that extension is NULL. - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), - Immediate(0)); - slow->Branch(not_equal, not_taken); + // The return value for postfix operations is ToNumber(input). + // Keep more precise type info if the input is some kind of + // number already. If the input is not a number we have to wait + // for the deferred code to convert it. + if (new_value.type_info().IsNumber()) { + old_value.set_type_info(new_value.type_info()); } - // Load next context in chain. - __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); - context = tmp.reg(); } - // If no outer scope calls eval, we do not need to check more - // context extensions. If we have reached an eval scope, we check - // all extensions from this point. - if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; - s = s->outer_scope(); - } + // Ensure the new value is writable. + frame_->Spill(new_value.reg()); - if (s->is_eval_scope()) { - // Loop up the context chain. There is no frame effect so it is - // safe to use raw labels here. - Label next, fast; - if (!context.is(tmp.reg())) { - __ movq(tmp.reg(), context); + DeferredCode* deferred = NULL; + if (is_postfix) { + deferred = new DeferredPostfixCountOperation(new_value.reg(), + old_value.reg(), + is_increment, + new_value.type_info()); + } else { + deferred = new DeferredPrefixCountOperation(new_value.reg(), + is_increment, + new_value.type_info()); } - // Load map for comparison into register, outside loop. - __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); - __ bind(&next); - // Terminate at global context. - __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); - __ j(equal, &fast); - // Check that extension is NULL. - __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); - slow->Branch(not_equal); - // Load next context in chain. - __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); - __ jmp(&next); - __ bind(&fast); - } - tmp.Unuse(); - // All extension objects were empty and it is safe to use a global - // load IC call. - LoadGlobal(); - frame_->Push(slot->var()->name()); - RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) - ? RelocInfo::CODE_TARGET - : RelocInfo::CODE_TARGET_CONTEXT; - Result answer = frame_->CallLoadIC(mode); - // A test rax instruction following the call signals that the inobject - // property case was inlined. Ensure that there is not a test rax - // instruction here. - masm_->nop(); - return answer; -} - - -void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, - TypeofState typeof_state, - Result* result, - JumpTarget* slow, - JumpTarget* done) { - // Generate fast-case code for variables that might be shadowed by - // eval-introduced variables. Eval is used a lot without - // introducing variables. In those cases, we do not want to - // perform a runtime call for all variables in the scope - // containing the eval. - if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { - *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); - done->Jump(result); - - } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { - Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); - Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); - if (potential_slot != NULL) { - // Generate fast case for locals that rewrite to slots. - // Allocate a fresh register to use as a temp in - // ContextSlotOperandCheckExtensions and to hold the result - // value. - *result = allocator_->Allocate(); - ASSERT(result->is_valid()); - __ movq(result->reg(), - ContextSlotOperandCheckExtensions(potential_slot, - *result, - slow)); - if (potential_slot->var()->mode() == Variable::CONST) { - __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex); - done->Branch(not_equal, result); - __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex); - } - done->Jump(result); - } else if (rewrite != NULL) { - // Generate fast case for argument loads. - Property* property = rewrite->AsProperty(); - if (property != NULL) { - VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); - Literal* key_literal = property->key()->AsLiteral(); - if (obj_proxy != NULL && - key_literal != NULL && - obj_proxy->IsArguments() && - key_literal->handle()->IsSmi()) { - // Load arguments object if there are no eval-introduced - // variables. Then load the argument from the arguments - // object using keyed load. - Result arguments = allocator()->Allocate(); - ASSERT(arguments.is_valid()); - __ movq(arguments.reg(), - ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), - arguments, - slow)); - frame_->Push(&arguments); - frame_->Push(key_literal->handle()); - *result = EmitKeyedLoad(); - done->Jump(result); - } - } + if (new_value.is_smi()) { + if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); } + } else { + __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); } - } -} - - -void CodeGenerator::LoadGlobal() { - if (in_spilled_code()) { - frame_->EmitPush(GlobalObject()); - } else { - Result temp = allocator_->Allocate(); - __ movq(temp.reg(), GlobalObject()); - frame_->Push(&temp); - } -} - - -void CodeGenerator::LoadGlobalReceiver() { - Result temp = allocator_->Allocate(); - Register reg = temp.reg(); - __ movq(reg, GlobalObject()); - __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); - frame_->Push(&temp); -} - - -ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { - if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; - ASSERT(scope()->arguments_shadow() != NULL); - // We don't want to do lazy arguments allocation for functions that - // have heap-allocated contexts, because it interfers with the - // uninitialized const tracking in the context objects. - return (scope()->num_heap_slots() > 0) - ? EAGER_ARGUMENTS_ALLOCATION - : LAZY_ARGUMENTS_ALLOCATION; -} - - -Result CodeGenerator::StoreArgumentsObject(bool initial) { - ArgumentsAllocationMode mode = ArgumentsMode(); - ASSERT(mode != NO_ARGUMENTS_ALLOCATION); - - Comment cmnt(masm_, "[ store arguments object"); - if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { - // When using lazy arguments allocation, we store the hole value - // as a sentinel indicating that the arguments object hasn't been - // allocated yet. - frame_->Push(Factory::the_hole_value()); - } else { - ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); - frame_->PushFunction(); - frame_->PushReceiverSlotAddress(); - frame_->Push(Smi::FromInt(scope()->num_parameters())); - Result result = frame_->CallStub(&stub, 3); - frame_->Push(&result); - } - - - Variable* arguments = scope()->arguments()->var(); - Variable* shadow = scope()->arguments_shadow()->var(); - ASSERT(arguments != NULL && arguments->slot() != NULL); - ASSERT(shadow != NULL && shadow->slot() != NULL); - JumpTarget done; - bool skip_arguments = false; - if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { - // We have to skip storing into the arguments slot if it has - // already been written to. This can happen if the a function - // has a local variable named 'arguments'. - LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); - Result probe = frame_->Pop(); - if (probe.is_constant()) { - // We have to skip updating the arguments object if it has been - // assigned a proper value. - skip_arguments = !probe.handle()->IsTheHole(); + if (is_increment) { + __ SmiAddConstant(new_value.reg(), + new_value.reg(), + Smi::FromInt(1), + deferred->entry_label()); } else { - __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex); - probe.Unuse(); - done.Branch(not_equal); + __ SmiSubConstant(new_value.reg(), + new_value.reg(), + Smi::FromInt(1), + deferred->entry_label()); } - } - if (!skip_arguments) { - StoreToSlot(arguments->slot(), NOT_CONST_INIT); - if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); - } - StoreToSlot(shadow->slot(), NOT_CONST_INIT); - return frame_->Pop(); -} - - -void CodeGenerator::LoadTypeofExpression(Expression* expr) { - // Special handling of identifiers as subexpressions of typeof. - Variable* variable = expr->AsVariableProxy()->AsVariable(); - if (variable != NULL && !variable->is_this() && variable->is_global()) { - // For a global variable we build the property reference - // <global>.<variable> and perform a (regular non-contextual) property - // load to make sure we do not get reference errors. - Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); - Literal key(variable->name()); - Property property(&global, &key, RelocInfo::kNoPosition); - Reference ref(this, &property); - ref.GetValue(); - } else if (variable != NULL && variable->slot() != NULL) { - // For a variable that rewrites to a slot, we signal it is the immediate - // subexpression of a typeof. - LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); - } else { - // Anything else can be handled normally. - Load(expr); - } -} + deferred->BindExit(); + // Postfix count operations return their input converted to + // number. The case when the input is already a number is covered + // above in the allocation code for old_value. + if (is_postfix && !new_value.type_info().IsNumber()) { + old_value.set_type_info(TypeInfo::Number()); + } -static bool CouldBeNaN(const Result& result) { - if (result.type_info().IsSmi()) return false; - if (result.type_info().IsInteger32()) return false; - if (!result.is_constant()) return true; - if (!result.handle()->IsHeapNumber()) return false; - return isnan(HeapNumber::cast(*result.handle())->value()); -} + new_value.set_type_info(TypeInfo::Number()); + // Postfix: store the old value in the allocated slot under the + // reference. + if (is_postfix) frame_->SetElementAt(target.size(), &old_value); -// Convert from signed to unsigned comparison to match the way EFLAGS are set -// by FPU and XMM compare instructions. -static Condition DoubleCondition(Condition cc) { - switch (cc) { - case less: return below; - case equal: return equal; - case less_equal: return below_equal; - case greater: return above; - case greater_equal: return above_equal; - default: UNREACHABLE(); + frame_->Push(&new_value); + // Non-constant: update the reference. + if (!is_const) target.SetValue(NOT_CONST_INIT); } - UNREACHABLE(); - return equal; -} - -void CodeGenerator::Comparison(AstNode* node, - Condition cc, - bool strict, - ControlDestination* dest) { - // Strict only makes sense for equality comparisons. - ASSERT(!strict || cc == equal); + // Postfix: drop the new value and use the old. + if (is_postfix) frame_->Drop(); +} - Result left_side; - Result right_side; - // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. - if (cc == greater || cc == less_equal) { - cc = ReverseCondition(cc); - left_side = frame_->Pop(); - right_side = frame_->Pop(); - } else { - right_side = frame_->Pop(); - left_side = frame_->Pop(); - } - ASSERT(cc == less || cc == equal || cc == greater_equal); - // If either side is a constant smi, optimize the comparison. - bool left_side_constant_smi = false; - bool left_side_constant_null = false; - bool left_side_constant_1_char_string = false; - if (left_side.is_constant()) { - left_side_constant_smi = left_side.handle()->IsSmi(); - left_side_constant_null = left_side.handle()->IsNull(); - left_side_constant_1_char_string = - (left_side.handle()->IsString() && - String::cast(*left_side.handle())->length() == 1 && - String::cast(*left_side.handle())->IsAsciiRepresentation()); - } - bool right_side_constant_smi = false; - bool right_side_constant_null = false; - bool right_side_constant_1_char_string = false; - if (right_side.is_constant()) { - right_side_constant_smi = right_side.handle()->IsSmi(); - right_side_constant_null = right_side.handle()->IsNull(); - right_side_constant_1_char_string = - (right_side.handle()->IsString() && - String::cast(*right_side.handle())->length() == 1 && - String::cast(*right_side.handle())->IsAsciiRepresentation()); - } - - if (left_side_constant_smi || right_side_constant_smi) { - if (left_side_constant_smi && right_side_constant_smi) { - // Trivial case, comparing two constants. - int left_value = Smi::cast(*left_side.handle())->value(); - int right_value = Smi::cast(*right_side.handle())->value(); - switch (cc) { - case less: - dest->Goto(left_value < right_value); - break; - case equal: - dest->Goto(left_value == right_value); - break; - case greater_equal: - dest->Goto(left_value >= right_value); - break; - default: - UNREACHABLE(); - } - } else { - // Only one side is a constant Smi. - // If left side is a constant Smi, reverse the operands. - // Since one side is a constant Smi, conversion order does not matter. - if (left_side_constant_smi) { - Result temp = left_side; - left_side = right_side; - right_side = temp; - cc = ReverseCondition(cc); - // This may re-introduce greater or less_equal as the value of cc. - // CompareStub and the inline code both support all values of cc. - } - // Implement comparison against a constant Smi, inlining the case - // where both sides are Smis. - left_side.ToRegister(); - Register left_reg = left_side.reg(); - Handle<Object> right_val = right_side.handle(); +void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { + // According to ECMA-262 section 11.11, page 58, the binary logical + // operators must yield the result of one of the two expressions + // before any ToBoolean() conversions. This means that the value + // produced by a && or || operator is not necessarily a boolean. - // Here we split control flow to the stub call and inlined cases - // before finally splitting it to the control destination. We use - // a jump target and branching to duplicate the virtual frame at - // the first split. We manually handle the off-frame references - // by reconstituting them on the non-fall-through path. - JumpTarget is_smi; + // NOTE: If the left hand side produces a materialized value (not + // control flow), we force the right hand side to do the same. This + // is necessary because we assume that if we get control flow on the + // last path out of an expression we got it on all paths. + if (node->op() == Token::AND) { + JumpTarget is_true; + ControlDestination dest(&is_true, destination()->false_target(), true); + LoadCondition(node->left(), &dest, false); - if (left_side.is_smi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(left_side.reg()); + if (dest.false_was_fall_through()) { + // The current false target was used as the fall-through. If + // there are no dangling jumps to is_true then the left + // subexpression was unconditionally false. Otherwise we have + // paths where we do have to evaluate the right subexpression. + if (is_true.is_linked()) { + // We need to compile the right subexpression. If the jump to + // the current false target was a forward jump then we have a + // valid frame, we have just bound the false target, and we + // have to jump around the code for the right subexpression. + if (has_valid_frame()) { + destination()->false_target()->Unuse(); + destination()->false_target()->Jump(); } + is_true.Bind(); + // The left subexpression compiled to control flow, so the + // right one is free to do so as well. + LoadCondition(node->right(), destination(), false); } else { - Condition left_is_smi = masm_->CheckSmi(left_side.reg()); - is_smi.Branch(left_is_smi); - - bool is_loop_condition = (node->AsExpression() != NULL) && - node->AsExpression()->is_loop_condition(); - if (!is_loop_condition && right_val->IsSmi()) { - // Right side is a constant smi and left side has been checked - // not to be a smi. - JumpTarget not_number; - __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset), - Factory::heap_number_map()); - not_number.Branch(not_equal, &left_side); - __ movsd(xmm1, - FieldOperand(left_reg, HeapNumber::kValueOffset)); - int value = Smi::cast(*right_val)->value(); - if (value == 0) { - __ xorpd(xmm0, xmm0); - } else { - Result temp = allocator()->Allocate(); - __ movl(temp.reg(), Immediate(value)); - __ cvtlsi2sd(xmm0, temp.reg()); - temp.Unuse(); - } - __ ucomisd(xmm1, xmm0); - // Jump to builtin for NaN. - not_number.Branch(parity_even, &left_side); - left_side.Unuse(); - dest->true_target()->Branch(DoubleCondition(cc)); - dest->false_target()->Jump(); - not_number.Bind(&left_side); - } - - // Setup and call the compare stub. - CompareStub stub(cc, strict, kCantBothBeNaN); - Result result = frame_->CallStub(&stub, &left_side, &right_side); - result.ToRegister(); - __ testq(result.reg(), result.reg()); - result.Unuse(); - dest->true_target()->Branch(cc); - dest->false_target()->Jump(); - - is_smi.Bind(); + // We have actually just jumped to or bound the current false + // target but the current control destination is not marked as + // used. + destination()->Use(false); } - left_side = Result(left_reg); - right_side = Result(right_val); - // Test smi equality and comparison by signed int comparison. - // Both sides are smis, so we can use an Immediate. - __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle())); - left_side.Unuse(); - right_side.Unuse(); - dest->Split(cc); - } - } else if (cc == equal && - (left_side_constant_null || right_side_constant_null)) { - // To make null checks efficient, we check if either the left side or - // the right side is the constant 'null'. - // If so, we optimize the code by inlining a null check instead of - // calling the (very) general runtime routine for checking equality. - Result operand = left_side_constant_null ? right_side : left_side; - right_side.Unuse(); - left_side.Unuse(); - operand.ToRegister(); - __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); - if (strict) { - operand.Unuse(); - dest->Split(equal); - } else { - // The 'null' value is only equal to 'undefined' if using non-strict - // comparisons. - dest->true_target()->Branch(equal); - __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); - dest->true_target()->Branch(equal); - Condition is_smi = masm_->CheckSmi(operand.reg()); - dest->false_target()->Branch(is_smi); + } else if (dest.is_used()) { + // The left subexpression compiled to control flow (and is_true + // was just bound), so the right is free to do so as well. + LoadCondition(node->right(), destination(), false); - // It can be an undetectable object. - // Use a scratch register in preference to spilling operand.reg(). - Result temp = allocator()->Allocate(); - ASSERT(temp.is_valid()); - __ movq(temp.reg(), - FieldOperand(operand.reg(), HeapObject::kMapOffset)); - __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - temp.Unuse(); - operand.Unuse(); - dest->Split(not_zero); - } - } else if (left_side_constant_1_char_string || - right_side_constant_1_char_string) { - if (left_side_constant_1_char_string && right_side_constant_1_char_string) { - // Trivial case, comparing two constants. - int left_value = String::cast(*left_side.handle())->Get(0); - int right_value = String::cast(*right_side.handle())->Get(0); - switch (cc) { - case less: - dest->Goto(left_value < right_value); - break; - case equal: - dest->Goto(left_value == right_value); - break; - case greater_equal: - dest->Goto(left_value >= right_value); - break; - default: - UNREACHABLE(); - } } else { - // Only one side is a constant 1 character string. - // If left side is a constant 1-character string, reverse the operands. - // Since one side is a constant string, conversion order does not matter. - if (left_side_constant_1_char_string) { - Result temp = left_side; - left_side = right_side; - right_side = temp; - cc = ReverseCondition(cc); - // This may reintroduce greater or less_equal as the value of cc. - // CompareStub and the inline code both support all values of cc. - } - // Implement comparison against a constant string, inlining the case - // where both sides are strings. - left_side.ToRegister(); - - // Here we split control flow to the stub call and inlined cases - // before finally splitting it to the control destination. We use - // a jump target and branching to duplicate the virtual frame at - // the first split. We manually handle the off-frame references - // by reconstituting them on the non-fall-through path. - JumpTarget is_not_string, is_string; - Register left_reg = left_side.reg(); - Handle<Object> right_val = right_side.handle(); - ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); - Condition is_smi = masm()->CheckSmi(left_reg); - is_not_string.Branch(is_smi, &left_side); - Result temp = allocator_->Allocate(); - ASSERT(temp.is_valid()); - __ movq(temp.reg(), - FieldOperand(left_reg, HeapObject::kMapOffset)); - __ movzxbl(temp.reg(), - FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); - // If we are testing for equality then make use of the symbol shortcut. - // Check if the left hand side has the same type as the right hand - // side (which is always a symbol). - if (cc == equal) { - Label not_a_symbol; - ASSERT(kSymbolTag != 0); - // Ensure that no non-strings have the symbol bit set. - ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); - __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. - __ j(zero, ¬_a_symbol); - // They are symbols, so do identity compare. - __ Cmp(left_reg, right_side.handle()); - dest->true_target()->Branch(equal); - dest->false_target()->Branch(not_equal); - __ bind(¬_a_symbol); - } - // Call the compare stub if the left side is not a flat ascii string. - __ andb(temp.reg(), - Immediate(kIsNotStringMask | - kStringRepresentationMask | - kStringEncodingMask)); - __ cmpb(temp.reg(), - Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); - temp.Unuse(); - is_string.Branch(equal, &left_side); + // We have a materialized value on the frame, so we exit with + // one on all paths. There are possibly also jumps to is_true + // from nested subexpressions. + JumpTarget pop_and_continue; + JumpTarget exit; - // Setup and call the compare stub. - is_not_string.Bind(&left_side); - CompareStub stub(cc, strict, kCantBothBeNaN); - Result result = frame_->CallStub(&stub, &left_side, &right_side); - result.ToRegister(); - __ testq(result.reg(), result.reg()); - result.Unuse(); - dest->true_target()->Branch(cc); - dest->false_target()->Jump(); + // Avoid popping the result if it converts to 'false' using the + // standard ToBoolean() conversion as described in ECMA-262, + // section 9.2, page 30. + // + // Duplicate the TOS value. The duplicate will be popped by + // ToBoolean. + frame_->Dup(); + ControlDestination dest(&pop_and_continue, &exit, true); + ToBoolean(&dest); - is_string.Bind(&left_side); - // left_side is a sequential ASCII string. - ASSERT(left_side.reg().is(left_reg)); - right_side = Result(right_val); - Result temp2 = allocator_->Allocate(); - ASSERT(temp2.is_valid()); - // Test string equality and comparison. - if (cc == equal) { - Label comparison_done; - __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), - Smi::FromInt(1)); - __ j(not_equal, &comparison_done); - uint8_t char_value = - static_cast<uint8_t>(String::cast(*right_val)->Get(0)); - __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), - Immediate(char_value)); - __ bind(&comparison_done); - } else { - __ movq(temp2.reg(), - FieldOperand(left_side.reg(), String::kLengthOffset)); - __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1)); - Label comparison; - // If the length is 0 then the subtraction gave -1 which compares less - // than any character. - __ j(negative, &comparison); - // Otherwise load the first character. - __ movzxbl(temp2.reg(), - FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); - __ bind(&comparison); - // Compare the first character of the string with the - // constant 1-character string. - uint8_t char_value = - static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0)); - __ cmpb(temp2.reg(), Immediate(char_value)); - Label characters_were_different; - __ j(not_equal, &characters_were_different); - // If the first character is the same then the long string sorts after - // the short one. - __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), - Smi::FromInt(1)); - __ bind(&characters_were_different); - } - temp2.Unuse(); - left_side.Unuse(); - right_side.Unuse(); - dest->Split(cc); - } - } else { - // Neither side is a constant Smi, constant 1-char string, or constant null. - // If either side is a non-smi constant, skip the smi check. - bool known_non_smi = - (left_side.is_constant() && !left_side.handle()->IsSmi()) || - (right_side.is_constant() && !right_side.handle()->IsSmi()) || - left_side.type_info().IsDouble() || - right_side.type_info().IsDouble(); + // Pop the result of evaluating the first part. + frame_->Drop(); - NaNInformation nan_info = - (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? - kBothCouldBeNaN : - kCantBothBeNaN; + // Compile right side expression. + is_true.Bind(); + Load(node->right()); - // Inline number comparison handling any combination of smi's and heap - // numbers if: - // code is in a loop - // the compare operation is different from equal - // compare is not a for-loop comparison - // The reason for excluding equal is that it will most likely be done - // with smi's (not heap numbers) and the code to comparing smi's is inlined - // separately. The same reason applies for for-loop comparison which will - // also most likely be smi comparisons. - bool is_loop_condition = (node->AsExpression() != NULL) - && node->AsExpression()->is_loop_condition(); - bool inline_number_compare = - loop_nesting() > 0 && cc != equal && !is_loop_condition; + // Exit (always with a materialized value). + exit.Bind(); + } - left_side.ToRegister(); - right_side.ToRegister(); + } else { + ASSERT(node->op() == Token::OR); + JumpTarget is_false; + ControlDestination dest(destination()->true_target(), &is_false, false); + LoadCondition(node->left(), &dest, false); - if (known_non_smi) { - // Inlined equality check: - // If at least one of the objects is not NaN, then if the objects - // are identical, they are equal. - if (nan_info == kCantBothBeNaN && cc == equal) { - __ cmpq(left_side.reg(), right_side.reg()); - dest->true_target()->Branch(equal); + if (dest.true_was_fall_through()) { + // The current true target was used as the fall-through. If + // there are no dangling jumps to is_false then the left + // subexpression was unconditionally true. Otherwise we have + // paths where we do have to evaluate the right subexpression. + if (is_false.is_linked()) { + // We need to compile the right subexpression. If the jump to + // the current true target was a forward jump then we have a + // valid frame, we have just bound the true target, and we + // have to jump around the code for the right subexpression. + if (has_valid_frame()) { + destination()->true_target()->Unuse(); + destination()->true_target()->Jump(); + } + is_false.Bind(); + // The left subexpression compiled to control flow, so the + // right one is free to do so as well. + LoadCondition(node->right(), destination(), false); + } else { + // We have just jumped to or bound the current true target but + // the current control destination is not marked as used. + destination()->Use(true); } - // Inlined number comparison: - if (inline_number_compare) { - GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); - } + } else if (dest.is_used()) { + // The left subexpression compiled to control flow (and is_false + // was just bound), so the right is free to do so as well. + LoadCondition(node->right(), destination(), false); - CompareStub stub(cc, strict, nan_info, !inline_number_compare); - Result answer = frame_->CallStub(&stub, &left_side, &right_side); - __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag. - answer.Unuse(); - dest->Split(cc); } else { - // Here we split control flow to the stub call and inlined cases - // before finally splitting it to the control destination. We use - // a jump target and branching to duplicate the virtual frame at - // the first split. We manually handle the off-frame references - // by reconstituting them on the non-fall-through path. - JumpTarget is_smi; - Register left_reg = left_side.reg(); - Register right_reg = right_side.reg(); - - Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg); - is_smi.Branch(both_smi); + // We have a materialized value on the frame, so we exit with + // one on all paths. There are possibly also jumps to is_false + // from nested subexpressions. + JumpTarget pop_and_continue; + JumpTarget exit; - // Inline the equality check if both operands can't be a NaN. If both - // objects are the same they are equal. - if (nan_info == kCantBothBeNaN && cc == equal) { - __ cmpq(left_side.reg(), right_side.reg()); - dest->true_target()->Branch(equal); - } + // Avoid popping the result if it converts to 'true' using the + // standard ToBoolean() conversion as described in ECMA-262, + // section 9.2, page 30. + // + // Duplicate the TOS value. The duplicate will be popped by + // ToBoolean. + frame_->Dup(); + ControlDestination dest(&exit, &pop_and_continue, false); + ToBoolean(&dest); - // Inlined number comparison: - if (inline_number_compare) { - GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); - } + // Pop the result of evaluating the first part. + frame_->Drop(); - CompareStub stub(cc, strict, nan_info, !inline_number_compare); - Result answer = frame_->CallStub(&stub, &left_side, &right_side); - __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags. - answer.Unuse(); - dest->true_target()->Branch(cc); - dest->false_target()->Jump(); + // Compile right side expression. + is_false.Bind(); + Load(node->right()); - is_smi.Bind(); - left_side = Result(left_reg); - right_side = Result(right_reg); - __ SmiCompare(left_side.reg(), right_side.reg()); - right_side.Unuse(); - left_side.Unuse(); - dest->Split(cc); + // Exit (always with a materialized value). + exit.Bind(); } } } +void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { + Comment cmnt(masm_, "[ BinaryOperation"); -// Load a comparison operand into into a XMM register. Jump to not_numbers jump -// target passing the left and right result if the operand is not a number. -static void LoadComparisonOperand(MacroAssembler* masm_, - Result* operand, - XMMRegister xmm_reg, - Result* left_side, - Result* right_side, - JumpTarget* not_numbers) { - Label done; - if (operand->type_info().IsDouble()) { - // Operand is known to be a heap number, just load it. - __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); - } else if (operand->type_info().IsSmi()) { - // Operand is known to be a smi. Convert it to double and keep the original - // smi. - __ SmiToInteger32(kScratchRegister, operand->reg()); - __ cvtlsi2sd(xmm_reg, kScratchRegister); + if (node->op() == Token::AND || node->op() == Token::OR) { + GenerateLogicalBooleanOperation(node); } else { - // Operand type not known, check for smi or heap number. - Label smi; - __ JumpIfSmi(operand->reg(), &smi); - if (!operand->type_info().IsNumber()) { - __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); - __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset), - kScratchRegister); - not_numbers->Branch(not_equal, left_side, right_side, taken); + // NOTE: The code below assumes that the slow cases (calls to runtime) + // never return a constant/immutable object. + OverwriteMode overwrite_mode = NO_OVERWRITE; + if (node->left()->AsBinaryOperation() != NULL && + node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { + overwrite_mode = OVERWRITE_LEFT; + } else if (node->right()->AsBinaryOperation() != NULL && + node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { + overwrite_mode = OVERWRITE_RIGHT; } - __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); - __ jmp(&done); - __ bind(&smi); - // Comvert smi to float and keep the original smi. - __ SmiToInteger32(kScratchRegister, operand->reg()); - __ cvtlsi2sd(xmm_reg, kScratchRegister); - __ jmp(&done); + if (node->left()->IsTrivial()) { + Load(node->right()); + Result right = frame_->Pop(); + frame_->Push(node->left()); + frame_->Push(&right); + } else { + Load(node->left()); + Load(node->right()); + } + GenericBinaryOperation(node, overwrite_mode); } - __ bind(&done); } -void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, - Result* right_side, - Condition cc, - ControlDestination* dest) { - ASSERT(left_side->is_register()); - ASSERT(right_side->is_register()); - - JumpTarget not_numbers; - // Load left and right operand into registers xmm0 and xmm1 and compare. - LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side, - ¬_numbers); - LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, - ¬_numbers); - __ ucomisd(xmm0, xmm1); - // Bail out if a NaN is involved. - not_numbers.Branch(parity_even, left_side, right_side); - - // Split to destination targets based on comparison. - left_side->Unuse(); - right_side->Unuse(); - dest->true_target()->Branch(DoubleCondition(cc)); - dest->false_target()->Jump(); - - not_numbers.Bind(left_side, right_side); +void CodeGenerator::VisitThisFunction(ThisFunction* node) { + frame_->PushFunction(); } -class DeferredInlineBinaryOperation: public DeferredCode { - public: - DeferredInlineBinaryOperation(Token::Value op, - Register dst, - Register left, - Register right, - OverwriteMode mode) - : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { - set_comment("[ DeferredInlineBinaryOperation"); - } - - virtual void Generate(); +void CodeGenerator::VisitCompareOperation(CompareOperation* node) { + Comment cmnt(masm_, "[ CompareOperation"); - private: - Token::Value op_; - Register dst_; - Register left_; - Register right_; - OverwriteMode mode_; -}; + // Get the expressions from the node. + Expression* left = node->left(); + Expression* right = node->right(); + Token::Value op = node->op(); + // To make typeof testing for natives implemented in JavaScript really + // efficient, we generate special code for expressions of the form: + // 'typeof <expression> == <string>'. + UnaryOperation* operation = left->AsUnaryOperation(); + if ((op == Token::EQ || op == Token::EQ_STRICT) && + (operation != NULL && operation->op() == Token::TYPEOF) && + (right->AsLiteral() != NULL && + right->AsLiteral()->handle()->IsString())) { + Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); + // Load the operand and move it to a register. + LoadTypeofExpression(operation->expression()); + Result answer = frame_->Pop(); + answer.ToRegister(); -void DeferredInlineBinaryOperation::Generate() { - Label done; - if ((op_ == Token::ADD) - || (op_ == Token::SUB) - || (op_ == Token::MUL) - || (op_ == Token::DIV)) { - Label call_runtime; - Label left_smi, right_smi, load_right, do_op; - __ JumpIfSmi(left_, &left_smi); - __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &call_runtime); - __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); - if (mode_ == OVERWRITE_LEFT) { - __ movq(dst_, left_); - } - __ jmp(&load_right); + if (check->Equals(Heap::number_symbol())) { + Condition is_smi = masm_->CheckSmi(answer.reg()); + destination()->true_target()->Branch(is_smi); + frame_->Spill(answer.reg()); + __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); + __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); + answer.Unuse(); + destination()->Split(equal); - __ bind(&left_smi); - __ SmiToInteger32(left_, left_); - __ cvtlsi2sd(xmm0, left_); - __ Integer32ToSmi(left_, left_); - if (mode_ == OVERWRITE_LEFT) { - Label alloc_failure; - __ AllocateHeapNumber(dst_, no_reg, &call_runtime); - } + } else if (check->Equals(Heap::string_symbol())) { + Condition is_smi = masm_->CheckSmi(answer.reg()); + destination()->false_target()->Branch(is_smi); - __ bind(&load_right); - __ JumpIfSmi(right_, &right_smi); - __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &call_runtime); - __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); - if (mode_ == OVERWRITE_RIGHT) { - __ movq(dst_, right_); - } else if (mode_ == NO_OVERWRITE) { - Label alloc_failure; - __ AllocateHeapNumber(dst_, no_reg, &call_runtime); - } - __ jmp(&do_op); + // It can be an undetectable string object. + __ movq(kScratchRegister, + FieldOperand(answer.reg(), HeapObject::kMapOffset)); + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + destination()->false_target()->Branch(not_zero); + __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); + answer.Unuse(); + destination()->Split(below); // Unsigned byte comparison needed. - __ bind(&right_smi); - __ SmiToInteger32(right_, right_); - __ cvtlsi2sd(xmm1, right_); - __ Integer32ToSmi(right_, right_); - if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { - Label alloc_failure; - __ AllocateHeapNumber(dst_, no_reg, &call_runtime); - } + } else if (check->Equals(Heap::boolean_symbol())) { + __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); + destination()->true_target()->Branch(equal); + __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); + answer.Unuse(); + destination()->Split(equal); - __ bind(&do_op); - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); - __ jmp(&done); + } else if (check->Equals(Heap::undefined_symbol())) { + __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); + destination()->true_target()->Branch(equal); - __ bind(&call_runtime); - } - GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); - stub.GenerateCall(masm_, left_, right_); - if (!dst_.is(rax)) __ movq(dst_, rax); - __ bind(&done); -} + Condition is_smi = masm_->CheckSmi(answer.reg()); + destination()->false_target()->Branch(is_smi); + // It can be an undetectable object. + __ movq(kScratchRegister, + FieldOperand(answer.reg(), HeapObject::kMapOffset)); + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + answer.Unuse(); + destination()->Split(not_zero); -static TypeInfo CalculateTypeInfo(TypeInfo operands_type, - Token::Value op, - const Result& right, - const Result& left) { - // Set TypeInfo of result according to the operation performed. - // We rely on the fact that smis have a 32 bit payload on x64. - STATIC_ASSERT(kSmiValueSize == 32); - switch (op) { - case Token::COMMA: - return right.type_info(); - case Token::OR: - case Token::AND: - // Result type can be either of the two input types. - return operands_type; - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - // Result is always a smi. - return TypeInfo::Smi(); - case Token::SAR: - case Token::SHL: - // Result is always a smi. - return TypeInfo::Smi(); - case Token::SHR: - // Result of x >>> y is always a smi if masked y >= 1, otherwise a number. - return (right.is_constant() && right.handle()->IsSmi() - && (Smi::cast(*right.handle())->value() & 0x1F) >= 1) - ? TypeInfo::Smi() - : TypeInfo::Number(); - case Token::ADD: - if (operands_type.IsNumber()) { - return TypeInfo::Number(); - } else if (left.type_info().IsString() || right.type_info().IsString()) { - return TypeInfo::String(); - } else { - return TypeInfo::Unknown(); - } - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - // Result is always a number. - return TypeInfo::Number(); - default: - UNREACHABLE(); - } - UNREACHABLE(); - return TypeInfo::Unknown(); -} + } else if (check->Equals(Heap::function_symbol())) { + Condition is_smi = masm_->CheckSmi(answer.reg()); + destination()->false_target()->Branch(is_smi); + frame_->Spill(answer.reg()); + __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); + destination()->true_target()->Branch(equal); + // Regular expressions are callable so typeof == 'function'. + __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); + answer.Unuse(); + destination()->Split(equal); + } else if (check->Equals(Heap::object_symbol())) { + Condition is_smi = masm_->CheckSmi(answer.reg()); + destination()->false_target()->Branch(is_smi); + __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); + destination()->true_target()->Branch(equal); -void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, - OverwriteMode overwrite_mode) { - Comment cmnt(masm_, "[ BinaryOperation"); - Token::Value op = expr->op(); - Comment cmnt_token(masm_, Token::String(op)); + // Regular expressions are typeof == 'function', not 'object'. + __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); + destination()->false_target()->Branch(equal); - if (op == Token::COMMA) { - // Simply discard left value. - frame_->Nip(1); + // It can be an undetectable object. + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + destination()->false_target()->Branch(not_zero); + __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); + destination()->false_target()->Branch(below); + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); + answer.Unuse(); + destination()->Split(below_equal); + } else { + // Uncommon case: typeof testing against a string literal that is + // never returned from the typeof operator. + answer.Unuse(); + destination()->Goto(false); + } return; } - Result right = frame_->Pop(); - Result left = frame_->Pop(); - - if (op == Token::ADD) { - const bool left_is_string = left.type_info().IsString(); - const bool right_is_string = right.type_info().IsString(); - // Make sure constant strings have string type info. - ASSERT(!(left.is_constant() && left.handle()->IsString()) || - left_is_string); - ASSERT(!(right.is_constant() && right.handle()->IsString()) || - right_is_string); - if (left_is_string || right_is_string) { - frame_->Push(&left); - frame_->Push(&right); - Result answer; - if (left_is_string) { - if (right_is_string) { - StringAddStub stub(NO_STRING_CHECK_IN_STUB); - answer = frame_->CallStub(&stub, 2); - } else { - answer = - frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); - } - } else if (right_is_string) { - answer = - frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); - } - answer.set_type_info(TypeInfo::String()); - frame_->Push(&answer); + Condition cc = no_condition; + bool strict = false; + switch (op) { + case Token::EQ_STRICT: + strict = true; + // Fall through + case Token::EQ: + cc = equal; + break; + case Token::LT: + cc = less; + break; + case Token::GT: + cc = greater; + break; + case Token::LTE: + cc = less_equal; + break; + case Token::GTE: + cc = greater_equal; + break; + case Token::IN: { + Load(left); + Load(right); + Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); + frame_->Push(&answer); // push the result return; } - // Neither operand is known to be a string. + case Token::INSTANCEOF: { + Load(left); + Load(right); + InstanceofStub stub; + Result answer = frame_->CallStub(&stub, 2); + answer.ToRegister(); + __ testq(answer.reg(), answer.reg()); + answer.Unuse(); + destination()->Split(zero); + return; + } + default: + UNREACHABLE(); } - bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi(); - bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi(); - bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi(); - bool right_is_non_smi_constant = - right.is_constant() && !right.handle()->IsSmi(); - - if (left_is_smi_constant && right_is_smi_constant) { - // Compute the constant result at compile time, and leave it on the frame. - int left_int = Smi::cast(*left.handle())->value(); - int right_int = Smi::cast(*right.handle())->value(); - if (FoldConstantSmis(op, left_int, right_int)) return; + if (left->IsTrivial()) { + Load(right); + Result right_result = frame_->Pop(); + frame_->Push(left); + frame_->Push(&right_result); + } else { + Load(left); + Load(right); } - // Get number type of left and right sub-expressions. - TypeInfo operands_type = - TypeInfo::Combine(left.type_info(), right.type_info()); - - TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); + Comparison(node, cc, strict, destination()); +} - Result answer; - if (left_is_non_smi_constant || right_is_non_smi_constant) { - // Go straight to the slow case, with no smi code. - GenericBinaryOpStub stub(op, - overwrite_mode, - NO_SMI_CODE_IN_STUB, - operands_type); - answer = stub.GenerateCall(masm_, frame_, &left, &right); - } else if (right_is_smi_constant) { - answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), - false, overwrite_mode); - } else if (left_is_smi_constant) { - answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), - true, overwrite_mode); - } else { - // Set the flags based on the operation, type and loop nesting level. - // Bit operations always assume they likely operate on Smis. Still only - // generate the inline Smi check code if this operation is part of a loop. - // For all other operations only inline the Smi check code for likely smis - // if the operation is part of a loop. - if (loop_nesting() > 0 && - (Token::IsBitOp(op) || - operands_type.IsInteger32() || - expr->type()->IsLikelySmi())) { - answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); - } else { - GenericBinaryOpStub stub(op, - overwrite_mode, - NO_GENERIC_BINARY_FLAGS, - operands_type); - answer = stub.GenerateCall(masm_, frame_, &left, &right); - } - } - answer.set_type_info(result_type); - frame_->Push(&answer); +#ifdef DEBUG +bool CodeGenerator::HasValidEntryRegisters() { + return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) + && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) + && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) + && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) + && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) + && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) + && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) + && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) + && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) + && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); } +#endif + // Emit a LoadIC call to get the value from receiver and leave it in @@ -6901,623 +7494,155 @@ void DeferredReferenceGetNamedValue::Generate() { } -void DeferredInlineSmiAdd::Generate() { - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); - igostub.GenerateCall(masm_, dst_, value_); - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -void DeferredInlineSmiAddReversed::Generate() { - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); - igostub.GenerateCall(masm_, value_, dst_); - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -void DeferredInlineSmiSub::Generate() { - GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); - igostub.GenerateCall(masm_, dst_, value_); - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -void DeferredInlineSmiOperation::Generate() { - // For mod we don't generate all the Smi code inline. - GenericBinaryOpStub stub( - op_, - overwrite_mode_, - (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); - stub.GenerateCall(masm_, src_, value_); - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -void DeferredInlineSmiOperationReversed::Generate() { - GenericBinaryOpStub stub( - op_, - overwrite_mode_, - NO_SMI_CODE_IN_STUB); - stub.GenerateCall(masm_, value_, src_); - if (!dst_.is(rax)) __ movq(dst_, rax); -} - - -Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, - Result* operand, - Handle<Object> value, - bool reversed, - OverwriteMode overwrite_mode) { - // Generate inline code for a binary operation when one of the - // operands is a constant smi. Consumes the argument "operand". - if (IsUnsafeSmi(value)) { - Result unsafe_operand(value); - if (reversed) { - return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, - overwrite_mode); - } else { - return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, - overwrite_mode); - } +class DeferredReferenceGetKeyedValue: public DeferredCode { + public: + explicit DeferredReferenceGetKeyedValue(Register dst, + Register receiver, + Register key) + : dst_(dst), receiver_(receiver), key_(key) { + set_comment("[ DeferredReferenceGetKeyedValue"); } - // Get the literal value. - Smi* smi_value = Smi::cast(*value); - int int_value = smi_value->value(); - - Token::Value op = expr->op(); - Result answer; - switch (op) { - case Token::ADD: { - operand->ToRegister(); - frame_->Spill(operand->reg()); - DeferredCode* deferred = NULL; - if (reversed) { - deferred = new DeferredInlineSmiAddReversed(operand->reg(), - smi_value, - overwrite_mode); - } else { - deferred = new DeferredInlineSmiAdd(operand->reg(), - smi_value, - overwrite_mode); - } - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - __ SmiAddConstant(operand->reg(), - operand->reg(), - smi_value, - deferred->entry_label()); - deferred->BindExit(); - answer = *operand; - break; - } - - case Token::SUB: { - if (reversed) { - Result constant_operand(value); - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, - overwrite_mode); - } else { - operand->ToRegister(); - frame_->Spill(operand->reg()); - DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - // A smi currently fits in a 32-bit Immediate. - __ SmiSubConstant(operand->reg(), - operand->reg(), - smi_value, - deferred->entry_label()); - deferred->BindExit(); - answer = *operand; - } - break; - } - - case Token::SAR: - if (reversed) { - Result constant_operand(value); - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, - overwrite_mode); - } else { - // Only the least significant 5 bits of the shift value are used. - // In the slow case, this masking is done inside the runtime call. - int shift_value = int_value & 0x1f; - operand->ToRegister(); - frame_->Spill(operand->reg()); - DeferredInlineSmiOperation* deferred = - new DeferredInlineSmiOperation(op, - operand->reg(), - operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - __ SmiShiftArithmeticRightConstant(operand->reg(), - operand->reg(), - shift_value); - deferred->BindExit(); - answer = *operand; - } - break; - - case Token::SHR: - if (reversed) { - Result constant_operand(value); - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, - overwrite_mode); - } else { - // Only the least significant 5 bits of the shift value are used. - // In the slow case, this masking is done inside the runtime call. - int shift_value = int_value & 0x1f; - operand->ToRegister(); - answer = allocator()->Allocate(); - ASSERT(answer.is_valid()); - DeferredInlineSmiOperation* deferred = - new DeferredInlineSmiOperation(op, - answer.reg(), - operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - __ SmiShiftLogicalRightConstant(answer.reg(), - operand->reg(), - shift_value, - deferred->entry_label()); - deferred->BindExit(); - operand->Unuse(); - } - break; - - case Token::SHL: - if (reversed) { - operand->ToRegister(); - - // We need rcx to be available to hold operand, and to be spilled. - // SmiShiftLeft implicitly modifies rcx. - if (operand->reg().is(rcx)) { - frame_->Spill(operand->reg()); - answer = allocator()->Allocate(); - } else { - Result rcx_reg = allocator()->Allocate(rcx); - // answer must not be rcx. - answer = allocator()->Allocate(); - // rcx_reg goes out of scope. - } - - DeferredInlineSmiOperationReversed* deferred = - new DeferredInlineSmiOperationReversed(op, - answer.reg(), - smi_value, - operand->reg(), - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - - __ Move(answer.reg(), smi_value); - __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg()); - operand->Unuse(); + virtual void Generate(); - deferred->BindExit(); - } else { - // Only the least significant 5 bits of the shift value are used. - // In the slow case, this masking is done inside the runtime call. - int shift_value = int_value & 0x1f; - operand->ToRegister(); - if (shift_value == 0) { - // Spill operand so it can be overwritten in the slow case. - frame_->Spill(operand->reg()); - DeferredInlineSmiOperation* deferred = - new DeferredInlineSmiOperation(op, - operand->reg(), - operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - deferred->BindExit(); - answer = *operand; - } else { - // Use a fresh temporary for nonzero shift values. - answer = allocator()->Allocate(); - ASSERT(answer.is_valid()); - DeferredInlineSmiOperation* deferred = - new DeferredInlineSmiOperation(op, - answer.reg(), - operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - __ SmiShiftLeftConstant(answer.reg(), - operand->reg(), - shift_value); - deferred->BindExit(); - operand->Unuse(); - } - } - break; + Label* patch_site() { return &patch_site_; } - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: { - operand->ToRegister(); - frame_->Spill(operand->reg()); - if (reversed) { - // Bit operations with a constant smi are commutative. - // We can swap left and right operands with no problem. - // Swap left and right overwrite modes. 0->0, 1->2, 2->1. - overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); - } - DeferredCode* deferred = new DeferredInlineSmiOperation(op, - operand->reg(), - operand->reg(), - smi_value, - overwrite_mode); - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), - deferred); - if (op == Token::BIT_AND) { - __ SmiAndConstant(operand->reg(), operand->reg(), smi_value); - } else if (op == Token::BIT_XOR) { - if (int_value != 0) { - __ SmiXorConstant(operand->reg(), operand->reg(), smi_value); - } - } else { - ASSERT(op == Token::BIT_OR); - if (int_value != 0) { - __ SmiOrConstant(operand->reg(), operand->reg(), smi_value); - } - } - deferred->BindExit(); - answer = *operand; - break; - } + private: + Label patch_site_; + Register dst_; + Register receiver_; + Register key_; +}; - // Generate inline code for mod of powers of 2 and negative powers of 2. - case Token::MOD: - if (!reversed && - int_value != 0 && - (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { - operand->ToRegister(); - frame_->Spill(operand->reg()); - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, - operand->reg(), - operand->reg(), - smi_value, - overwrite_mode); - // Check for negative or non-Smi left hand side. - __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label()); - if (int_value < 0) int_value = -int_value; - if (int_value == 1) { - __ Move(operand->reg(), Smi::FromInt(0)); - } else { - __ SmiAndConstant(operand->reg(), - operand->reg(), - Smi::FromInt(int_value - 1)); - } - deferred->BindExit(); - answer = *operand; - break; // This break only applies if we generated code for MOD. - } - // Fall through if we did not find a power of 2 on the right hand side! - // The next case must be the default. - default: { - Result constant_operand(value); - if (reversed) { - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, - overwrite_mode); - } else { - answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, - overwrite_mode); - } - break; +void DeferredReferenceGetKeyedValue::Generate() { + if (receiver_.is(rdx)) { + if (!key_.is(rax)) { + __ movq(rax, key_); + } // else do nothing. + } else if (receiver_.is(rax)) { + if (key_.is(rdx)) { + __ xchg(rax, rdx); + } else if (key_.is(rax)) { + __ movq(rdx, receiver_); + } else { + __ movq(rdx, receiver_); + __ movq(rax, key_); } + } else if (key_.is(rax)) { + __ movq(rdx, receiver_); + } else { + __ movq(rax, key_); + __ movq(rdx, receiver_); } - ASSERT(answer.is_valid()); - return answer; -} - + // Calculate the delta from the IC call instruction to the map check + // movq instruction in the inlined version. This delta is stored in + // a test(rax, delta) instruction after the call so that we can find + // it in the IC initialization code and patch the movq instruction. + // This means that we cannot allow test instructions after calls to + // KeyedLoadIC stubs in other places. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The delta from the start of the map-compare instruction to the + // test instruction. We use masm_-> directly here instead of the __ + // macro because the macro sometimes uses macro expansion to turn + // into something that can't return a value. This is encountered + // when doing generated code coverage tests. + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); + // Here we use masm_-> instead of the __ macro because this is the + // instruction that gets patched and coverage code gets in the way. + // TODO(X64): Consider whether it's worth switching the test to a + // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't + // be generated normally. + masm_->testl(rax, Immediate(-delta_to_patch_site)); + __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); -void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg, - TypeInfo type, - DeferredCode* deferred) { - if (!type.IsSmi()) { - __ JumpIfNotSmi(reg, deferred->entry_label()); - } - if (FLAG_debug_code) { - __ AbortIfNotSmi(reg); - } + if (!dst_.is(rax)) __ movq(dst_, rax); } -void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, - Register right, - TypeInfo left_info, - TypeInfo right_info, - DeferredCode* deferred) { - if (!left_info.IsSmi() && !right_info.IsSmi()) { - __ JumpIfNotBothSmi(left, right, deferred->entry_label()); - } else if (!left_info.IsSmi()) { - __ JumpIfNotSmi(left, deferred->entry_label()); - } else if (!right_info.IsSmi()) { - __ JumpIfNotSmi(right, deferred->entry_label()); - } - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); +class DeferredReferenceSetKeyedValue: public DeferredCode { + public: + DeferredReferenceSetKeyedValue(Register value, + Register key, + Register receiver) + : value_(value), key_(key), receiver_(receiver) { + set_comment("[ DeferredReferenceSetKeyedValue"); } -} - - -// Implements a binary operation using a deferred code object and some -// inline code to operate on smis quickly. -Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, - Result* left, - Result* right, - OverwriteMode overwrite_mode) { - // Copy the type info because left and right may be overwritten. - TypeInfo left_type_info = left->type_info(); - TypeInfo right_type_info = right->type_info(); - Token::Value op = expr->op(); - Result answer; - // Special handling of div and mod because they use fixed registers. - if (op == Token::DIV || op == Token::MOD) { - // We need rax as the quotient register, rdx as the remainder - // register, neither left nor right in rax or rdx, and left copied - // to rax. - Result quotient; - Result remainder; - bool left_is_in_rax = false; - // Step 1: get rax for quotient. - if ((left->is_register() && left->reg().is(rax)) || - (right->is_register() && right->reg().is(rax))) { - // One or both is in rax. Use a fresh non-rdx register for - // them. - Result fresh = allocator_->Allocate(); - ASSERT(fresh.is_valid()); - if (fresh.reg().is(rdx)) { - remainder = fresh; - fresh = allocator_->Allocate(); - ASSERT(fresh.is_valid()); - } - if (left->is_register() && left->reg().is(rax)) { - quotient = *left; - *left = fresh; - left_is_in_rax = true; - } - if (right->is_register() && right->reg().is(rax)) { - quotient = *right; - *right = fresh; - } - __ movq(fresh.reg(), rax); - } else { - // Neither left nor right is in rax. - quotient = allocator_->Allocate(rax); - } - ASSERT(quotient.is_register() && quotient.reg().is(rax)); - ASSERT(!(left->is_register() && left->reg().is(rax))); - ASSERT(!(right->is_register() && right->reg().is(rax))); - - // Step 2: get rdx for remainder if necessary. - if (!remainder.is_valid()) { - if ((left->is_register() && left->reg().is(rdx)) || - (right->is_register() && right->reg().is(rdx))) { - Result fresh = allocator_->Allocate(); - ASSERT(fresh.is_valid()); - if (left->is_register() && left->reg().is(rdx)) { - remainder = *left; - *left = fresh; - } - if (right->is_register() && right->reg().is(rdx)) { - remainder = *right; - *right = fresh; - } - __ movq(fresh.reg(), rdx); - } else { - // Neither left nor right is in rdx. - remainder = allocator_->Allocate(rdx); - } - } - ASSERT(remainder.is_register() && remainder.reg().is(rdx)); - ASSERT(!(left->is_register() && left->reg().is(rdx))); - ASSERT(!(right->is_register() && right->reg().is(rdx))); - - left->ToRegister(); - right->ToRegister(); - frame_->Spill(rax); - frame_->Spill(rdx); - // Check that left and right are smi tagged. - DeferredInlineBinaryOperation* deferred = - new DeferredInlineBinaryOperation(op, - (op == Token::DIV) ? rax : rdx, - left->reg(), - right->reg(), - overwrite_mode); - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), - left_type_info, right_type_info, deferred); - - if (op == Token::DIV) { - __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label()); - deferred->BindExit(); - left->Unuse(); - right->Unuse(); - answer = quotient; - } else { - ASSERT(op == Token::MOD); - __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label()); - deferred->BindExit(); - left->Unuse(); - right->Unuse(); - answer = remainder; - } - ASSERT(answer.is_valid()); - return answer; - } + virtual void Generate(); - // Special handling of shift operations because they use fixed - // registers. - if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { - // Move left out of rcx if necessary. - if (left->is_register() && left->reg().is(rcx)) { - *left = allocator_->Allocate(); - ASSERT(left->is_valid()); - __ movq(left->reg(), rcx); - } - right->ToRegister(rcx); - left->ToRegister(); - ASSERT(left->is_register() && !left->reg().is(rcx)); - ASSERT(right->is_register() && right->reg().is(rcx)); + Label* patch_site() { return &patch_site_; } - // We will modify right, it must be spilled. - frame_->Spill(rcx); + private: + Register value_; + Register key_; + Register receiver_; + Label patch_site_; +}; - // Use a fresh answer register to avoid spilling the left operand. - answer = allocator_->Allocate(); - ASSERT(answer.is_valid()); - // Check that both operands are smis using the answer register as a - // temporary. - DeferredInlineBinaryOperation* deferred = - new DeferredInlineBinaryOperation(op, - answer.reg(), - left->reg(), - rcx, - overwrite_mode); - Label do_op; - if (right_type_info.IsSmi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(right->reg()); - } - __ movq(answer.reg(), left->reg()); - // If left is not known to be a smi, check if it is. - // If left is not known to be a number, and it isn't a smi, check if - // it is a HeapNumber. - if (!left_type_info.IsSmi()) { - __ JumpIfSmi(answer.reg(), &do_op); - if (!left_type_info.IsNumber()) { - // Branch if not a heapnumber. - __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), - Factory::heap_number_map()); - deferred->Branch(not_equal); - } - // Load integer value into answer register using truncation. - __ cvttsd2si(answer.reg(), - FieldOperand(answer.reg(), HeapNumber::kValueOffset)); - // Branch if we might have overflowed. - // (False negative for Smi::kMinValue) - __ cmpq(answer.reg(), Immediate(0x80000000)); - deferred->Branch(equal); - // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging. - __ Integer32ToSmi(answer.reg(), answer.reg()); - } else { - // Fast case - both are actually smis. - if (FLAG_debug_code) { - __ AbortIfNotSmi(left->reg()); - } - } +void DeferredReferenceSetKeyedValue::Generate() { + __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); + // Move value, receiver, and key to registers rax, rdx, and rcx, as + // the IC stub expects. + // Move value to rax, using xchg if the receiver or key is in rax. + if (!value_.is(rax)) { + if (!receiver_.is(rax) && !key_.is(rax)) { + __ movq(rax, value_); } else { - JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx, - left_type_info, right_type_info, deferred); - } - __ bind(&do_op); - - // Perform the operation. - switch (op) { - case Token::SAR: - __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx); - break; - case Token::SHR: { - __ SmiShiftLogicalRight(answer.reg(), - left->reg(), - rcx, - deferred->entry_label()); - break; + __ xchg(rax, value_); + // Update receiver_ and key_ if they are affected by the swap. + if (receiver_.is(rax)) { + receiver_ = value_; + } else if (receiver_.is(value_)) { + receiver_ = rax; } - case Token::SHL: { - __ SmiShiftLeft(answer.reg(), - left->reg(), - rcx); - break; + if (key_.is(rax)) { + key_ = value_; + } else if (key_.is(value_)) { + key_ = rax; } - default: - UNREACHABLE(); } - deferred->BindExit(); - left->Unuse(); - right->Unuse(); - ASSERT(answer.is_valid()); - return answer; } - - // Handle the other binary operations. - left->ToRegister(); - right->ToRegister(); - // A newly allocated register answer is used to hold the answer. The - // registers containing left and right are not modified so they don't - // need to be spilled in the fast case. - answer = allocator_->Allocate(); - ASSERT(answer.is_valid()); - - // Perform the smi tag check. - DeferredInlineBinaryOperation* deferred = - new DeferredInlineBinaryOperation(op, - answer.reg(), - left->reg(), - right->reg(), - overwrite_mode); - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), - left_type_info, right_type_info, deferred); - - switch (op) { - case Token::ADD: - __ SmiAdd(answer.reg(), - left->reg(), - right->reg(), - deferred->entry_label()); - break; - - case Token::SUB: - __ SmiSub(answer.reg(), - left->reg(), - right->reg(), - deferred->entry_label()); - break; - - case Token::MUL: { - __ SmiMul(answer.reg(), - left->reg(), - right->reg(), - deferred->entry_label()); - break; + // Value is now in rax. Its original location is remembered in value_, + // and the value is restored to value_ before returning. + // The variables receiver_ and key_ are not preserved. + // Move receiver and key to rdx and rcx, swapping if necessary. + if (receiver_.is(rdx)) { + if (!key_.is(rcx)) { + __ movq(rcx, key_); + } // Else everything is already in the right place. + } else if (receiver_.is(rcx)) { + if (key_.is(rdx)) { + __ xchg(rcx, rdx); + } else if (key_.is(rcx)) { + __ movq(rdx, receiver_); + } else { + __ movq(rdx, receiver_); + __ movq(rcx, key_); } - - case Token::BIT_OR: - __ SmiOr(answer.reg(), left->reg(), right->reg()); - break; - - case Token::BIT_AND: - __ SmiAnd(answer.reg(), left->reg(), right->reg()); - break; - - case Token::BIT_XOR: - __ SmiXor(answer.reg(), left->reg(), right->reg()); - break; - - default: - UNREACHABLE(); - break; + } else if (key_.is(rcx)) { + __ movq(rdx, receiver_); + } else { + __ movq(rcx, key_); + __ movq(rdx, receiver_); } - deferred->BindExit(); - left->Unuse(); - right->Unuse(); - ASSERT(answer.is_valid()); - return answer; + + // Call the IC stub. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The delta from the start of the map-compare instructions (initial movq) + // to the test instruction. We use masm_-> directly here instead of the + // __ macro because the macro sometimes uses macro expansion to turn + // into something that can't return a value. This is encountered + // when doing generated code coverage tests. + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); + // Here we use masm_-> instead of the __ macro because this is the + // instruction that gets patched and coverage code gets in the way. + masm_->testl(rax, Immediate(-delta_to_patch_site)); + // Restore value (returned from store IC). + if (!value_.is(rax)) __ movq(value_, rax); } @@ -8143,90 +8268,701 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } -bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { - Object* answer_object = Heap::undefined_value(); - switch (op) { - case Token::ADD: - // Use intptr_t to detect overflow of 32-bit int. - if (Smi::IsValid(static_cast<intptr_t>(left) + right)) { - answer_object = Smi::FromInt(left + right); +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (!(left.is(left_arg) && right.is(right_arg))) { + if (left.is(right_arg) && right.is(left_arg)) { + if (IsOperationCommutative()) { + SetArgsReversed(); + } else { + __ xchg(left, right); + } + } else if (left.is(left_arg)) { + __ movq(right_arg, right); + } else if (right.is(right_arg)) { + __ movq(left_arg, left); + } else if (left.is(right_arg)) { + if (IsOperationCommutative()) { + __ movq(left_arg, right); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying left argument. + __ movq(left_arg, left); + __ movq(right_arg, right); + } + } else if (right.is(left_arg)) { + if (IsOperationCommutative()) { + __ movq(right_arg, left); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying right argument. + __ movq(right_arg, right); + __ movq(left_arg, left); + } + } else { + // Order of moves is not important. + __ movq(left_arg, left); + __ movq(right_arg, right); } + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Smi* right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ Push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (left.is(left_arg)) { + __ Move(right_arg, right); + } else if (left.is(right_arg) && IsOperationCommutative()) { + __ Move(left_arg, right); + SetArgsReversed(); + } else { + // For non-commutative operations, left and right_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite left before moving + // it to left_arg. + __ movq(left_arg, left); + __ Move(right_arg, right); + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Smi* left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ Push(left); + __ push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (right.is(right_arg)) { + __ Move(left_arg, left); + } else if (right.is(left_arg) && IsOperationCommutative()) { + __ Move(right_arg, left); + SetArgsReversed(); + } else { + // For non-commutative operations, right and left_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite right before moving + // it to right_arg. + __ movq(right_arg, right); + __ Move(left_arg, left); + } + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, + VirtualFrame* frame, + Result* left, + Result* right) { + if (ArgsInRegistersSupported()) { + SetArgsInRegisters(); + return frame->CallStub(this, left, right); + } else { + frame->Push(left); + frame->Push(right); + return frame->CallStub(this, 2); + } +} + + +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { + // 1. Move arguments into rdx, rax except for DIV and MOD, which need the + // dividend in rax and rdx free for the division. Use rax, rbx for those. + Comment load_comment(masm, "-- Load arguments"); + Register left = rdx; + Register right = rax; + if (op_ == Token::DIV || op_ == Token::MOD) { + left = rax; + right = rbx; + if (HasArgsInRegisters()) { + __ movq(rbx, rax); + __ movq(rax, rdx); + } + } + if (!HasArgsInRegisters()) { + __ movq(right, Operand(rsp, 1 * kPointerSize)); + __ movq(left, Operand(rsp, 2 * kPointerSize)); + } + + Label not_smis; + // 2. Smi check both operands. + if (static_operands_type_.IsSmi()) { + // Skip smi check if we know that both arguments are smis. + if (FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } + if (op_ == Token::BIT_OR) { + // Handle OR here, since we do extra smi-checking in the or code below. + __ SmiOr(right, right, left); + GenerateReturn(masm); + return; + } + } else { + if (op_ != Token::BIT_OR) { + // Skip the check for OR as it is better combined with the + // actual operation. + Comment smi_check_comment(masm, "-- Smi check arguments"); + __ JumpIfNotBothSmi(left, right, ¬_smis); + } + } + + // 3. Operands are both smis (except for OR), perform the operation leaving + // the result in rax and check the result if necessary. + Comment perform_smi(masm, "-- Perform smi operation"); + Label use_fp_on_smis; + switch (op_) { + case Token::ADD: { + ASSERT(right.is(rax)); + __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. break; - case Token::SUB: - // Use intptr_t to detect overflow of 32-bit int. - if (Smi::IsValid(static_cast<intptr_t>(left) - right)) { - answer_object = Smi::FromInt(left - right); - } + } + + case Token::SUB: { + __ SmiSub(left, left, right, &use_fp_on_smis); + __ movq(rax, left); break; - case Token::MUL: { - double answer = static_cast<double>(left) * right; - if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { - // If the product is zero and the non-zero factor is negative, - // the spec requires us to return floating point negative zero. - if (answer != 0 || (left + right) >= 0) { - answer_object = Smi::FromInt(static_cast<int>(answer)); - } - } - } + } + + case Token::MUL: + ASSERT(right.is(rax)); + __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. break; + case Token::DIV: + ASSERT(left.is(rax)); + __ SmiDiv(left, left, right, &use_fp_on_smis); + break; + case Token::MOD: + ASSERT(left.is(rax)); + __ SmiMod(left, left, right, slow); break; + case Token::BIT_OR: - answer_object = Smi::FromInt(left | right); + ASSERT(right.is(rax)); + __ movq(rcx, right); // Save the right operand. + __ SmiOr(right, right, left); // BIT_OR is commutative. + __ testb(right, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smis); break; + case Token::BIT_AND: - answer_object = Smi::FromInt(left & right); + ASSERT(right.is(rax)); + __ SmiAnd(right, right, left); // BIT_AND is commutative. break; + case Token::BIT_XOR: - answer_object = Smi::FromInt(left ^ right); + ASSERT(right.is(rax)); + __ SmiXor(right, right, left); // BIT_XOR is commutative. break; - case Token::SHL: { - int shift_amount = right & 0x1F; - if (Smi::IsValid(left << shift_amount)) { - answer_object = Smi::FromInt(left << shift_amount); + case Token::SHL: + case Token::SHR: + case Token::SAR: + switch (op_) { + case Token::SAR: + __ SmiShiftArithmeticRight(left, left, right); + break; + case Token::SHR: + __ SmiShiftLogicalRight(left, left, right, slow); + break; + case Token::SHL: + __ SmiShiftLeft(left, left, right); + break; + default: + UNREACHABLE(); + } + __ movq(rax, left); + break; + + default: + UNREACHABLE(); + break; + } + + // 4. Emit return of result in rax. + GenerateReturn(masm); + + // 5. For some operations emit inline code to perform floating point + // operations on known smis (e.g., if the result of the operation + // overflowed the smi range). + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + ASSERT(use_fp_on_smis.is_linked()); + __ bind(&use_fp_on_smis); + if (op_ == Token::DIV) { + __ movq(rdx, rax); + __ movq(rax, rbx); + } + // left is rdx, right is rax. + __ AllocateHeapNumber(rbx, rcx, slow); + FloatingPointHelper::LoadSSE2SmiOperands(masm); + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); + __ movq(rax, rbx); + GenerateReturn(masm); + } + default: + break; + } + + // 6. Non-smi operands, fall out to the non-smi code with the operands in + // rdx and rax. + Comment done_comment(masm, "-- Enter non-smi code"); + __ bind(¬_smis); + + switch (op_) { + case Token::DIV: + case Token::MOD: + // Operands are in rax, rbx at this point. + __ movq(rdx, rax); + __ movq(rax, rbx); + break; + + case Token::BIT_OR: + // Right operand is saved in rcx and rax was destroyed by the smi + // operation. + __ movq(rax, rcx); + break; + + default: + break; + } +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + Label call_runtime; + + if (ShouldGenerateSmiCode()) { + GenerateSmiCode(masm, &call_runtime); + } else if (op_ != Token::MOD) { + if (!HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + } + // Floating point case. + if (ShouldGenerateFPCode()) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + HasSmiCodeInStub()) { + // Execution reaches this point when the first non-smi argument occurs + // (and only if smi code is generated). This is the right moment to + // patch to HEAP_NUMBERS state. The transition is attempted only for + // the four basic operations. The stub stays in the DEFAULT state + // forever for all other operations (also if smi code is skipped). + GenerateTypeTransition(masm); + break; + } + + Label not_floats; + // rax: y + // rdx: x + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(rdx); + __ AbortIfNotNumber(rax); + } + FloatingPointHelper::LoadSSE2NumberOperands(masm); + } else { + FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); + } + + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + // Allocate a heap number, if needed. + Label skip_allocation; + OverwriteMode mode = mode_; + if (HasArgsReversed()) { + if (mode == OVERWRITE_RIGHT) { + mode = OVERWRITE_LEFT; + } else if (mode == OVERWRITE_LEFT) { + mode = OVERWRITE_RIGHT; + } + } + switch (mode) { + case OVERWRITE_LEFT: + __ JumpIfNotSmi(rdx, &skip_allocation); + __ AllocateHeapNumber(rbx, rcx, &call_runtime); + __ movq(rdx, rbx); + __ bind(&skip_allocation); + __ movq(rax, rdx); + break; + case OVERWRITE_RIGHT: + // If the argument in rax is already an object, we skip the + // allocation of a heap number. + __ JumpIfNotSmi(rax, &skip_allocation); + // Fall through! + case NO_OVERWRITE: + // Allocate a heap number for the result. Keep rax and rdx intact + // for the possible runtime call. + __ AllocateHeapNumber(rbx, rcx, &call_runtime); + __ movq(rax, rbx); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + __ bind(¬_floats); + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + !HasSmiCodeInStub()) { + // Execution reaches this point when the first non-number argument + // occurs (and only if smi code is skipped from the stub, otherwise + // the patching has already been done earlier in this case branch). + // A perfect moment to try patching to STRINGS for ADD operation. + if (op_ == Token::ADD) { + GenerateTypeTransition(masm); + } } break; } - case Token::SHR: { - int shift_amount = right & 0x1F; - unsigned int unsigned_left = left; - unsigned_left >>= shift_amount; - if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { - answer_object = Smi::FromInt(unsigned_left); - } + case Token::MOD: { + // For MOD we go directly to runtime in the non-smi case. break; } - case Token::SAR: { - int shift_amount = right & 0x1F; - unsigned int unsigned_left = left; - if (left < 0) { - // Perform arithmetic shift of a negative number by - // complementing number, logical shifting, complementing again. - unsigned_left = ~unsigned_left; - unsigned_left >>= shift_amount; - unsigned_left = ~unsigned_left; + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHL: + case Token::SHR: { + Label skip_allocation, non_smi_shr_result; + Register heap_number_map = r9; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(rdx); + __ AbortIfNotNumber(rax); + } + FloatingPointHelper::LoadNumbersAsIntegers(masm); } else { - unsigned_left >>= shift_amount; + FloatingPointHelper::LoadAsIntegers(masm, + &call_runtime, + heap_number_map); } - ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); - answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); + switch (op_) { + case Token::BIT_OR: __ orl(rax, rcx); break; + case Token::BIT_AND: __ andl(rax, rcx); break; + case Token::BIT_XOR: __ xorl(rax, rcx); break; + case Token::SAR: __ sarl_cl(rax); break; + case Token::SHL: __ shll_cl(rax); break; + case Token::SHR: { + __ shrl_cl(rax); + // Check if result is negative. This can only happen for a shift + // by zero. + __ testl(rax, rax); + __ j(negative, &non_smi_shr_result); + break; + } + default: UNREACHABLE(); + } + + STATIC_ASSERT(kSmiValueSize == 32); + // Tag smi result and return. + __ Integer32ToSmi(rax, rax); + GenerateReturn(masm); + + // All bit-ops except SHR return a signed int32 that can be + // returned immediately as a smi. + // We might need to allocate a HeapNumber if we shift a negative + // number right by zero (i.e., convert to UInt32). + if (op_ == Token::SHR) { + ASSERT(non_smi_shr_result.is_linked()); + __ bind(&non_smi_shr_result); + // Allocate a heap number if needed. + __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). + switch (mode_) { + case OVERWRITE_LEFT: + case OVERWRITE_RIGHT: + // If the operand was an object, we skip the + // allocation of a heap number. + __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? + 1 * kPointerSize : 2 * kPointerSize)); + __ JumpIfNotSmi(rax, &skip_allocation); + // Fall through! + case NO_OVERWRITE: + // Allocate heap number in new space. + // Not using AllocateHeapNumber macro in order to reuse + // already loaded heap_number_map. + __ AllocateInNewSpace(HeapNumber::kSize, + rax, + rcx, + no_reg, + &call_runtime, + TAG_OBJECT); + // Set the map. + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ movq(FieldOperand(rax, HeapObject::kMapOffset), + heap_number_map); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + // Store the result in the HeapNumber and return. + __ cvtqsi2sd(xmm0, rbx); + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + } + break; } + default: UNREACHABLE(); break; + } + } + + // If all else fails, use the runtime system to get the correct + // result. If arguments was passed in registers now place them on the + // stack in the correct order below the return address. + __ bind(&call_runtime); + + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + switch (op_) { + case Token::ADD: { + // Registers containing left and right operands respectively. + Register lhs, rhs; + + if (HasArgsReversed()) { + lhs = rax; + rhs = rdx; + } else { + lhs = rdx; + rhs = rax; + } + + // Test for string arguments before calling runtime. + Label not_strings, both_strings, not_string1, string1, string1_smi2; + + // If this stub has already generated FP-specific code then the arguments + // are already in rdx and rax. + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + + Condition is_smi; + is_smi = masm->CheckSmi(lhs); + __ j(is_smi, ¬_string1); + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); + __ j(above_equal, ¬_string1); + + // First argument is a a string, test second. + is_smi = masm->CheckSmi(rhs); + __ j(is_smi, &string1_smi2); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); + __ j(above_equal, &string1); + + // First and second argument are strings. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, rhs, rbx, rcx, r8, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ movq(Operand(rsp, 1 * kPointerSize), rbx); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); + + // First argument was not a string, test second. + __ bind(¬_string1); + is_smi = masm->CheckSmi(rhs); + __ j(is_smi, ¬_strings); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); + __ j(above_equal, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); + + __ bind(¬_strings); + // Neither argument is a string. + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + } + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; default: UNREACHABLE(); - break; } - if (answer_object == Heap::undefined_value()) { - return false; +} + + +void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { + ASSERT(!HasArgsInRegisters()); + __ movq(rax, Operand(rsp, 1 * kPointerSize)); + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); +} + + +void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { + // If arguments are not passed in registers remove them from the stack before + // returning. + if (!HasArgsInRegisters()) { + __ ret(2 * kPointerSize); // Remove both operands + } else { + __ ret(0); } - frame_->Push(Handle<Object>(answer_object)); - return true; } -// End of CodeGenerator implementation. +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + ASSERT(HasArgsInRegisters()); + __ pop(rcx); + if (HasArgsReversed()) { + __ push(rax); + __ push(rdx); + } else { + __ push(rdx); + __ push(rax); + } + __ push(rcx); +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + // Ensure the operands are on the stack. + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + // Left and right arguments are already on stack. + __ pop(rcx); // Save the return address. + + // Push this stub's key. + __ Push(Smi::FromInt(MinorKey())); + + // Although the operation and the type info are encoded into the key, + // the encoding is opaque, so push them too. + __ Push(Smi::FromInt(op_)); + + __ Push(Smi::FromInt(runtime_operands_type_)); + + __ push(rcx); // The return address. + + // Perform patching to an appropriate fast case and return the result. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Input on stack: @@ -8504,6 +9240,148 @@ void IntegerConvert(MacroAssembler* masm, } +// Input: rdx, rax are the left and right objects of a bit op. +// Output: rax, rcx are left and right integers for a bit op. +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { + // Check float operands. + Label done; + Label rax_is_smi; + Label rax_is_object; + Label rdx_is_object; + + __ JumpIfNotSmi(rdx, &rdx_is_object); + __ SmiToInteger32(rdx, rdx); + __ JumpIfSmi(rax, &rax_is_smi); + + __ bind(&rax_is_object); + IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. + __ jmp(&done); + + __ bind(&rdx_is_object); + IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. + __ JumpIfNotSmi(rax, &rax_is_object); + __ bind(&rax_is_smi); + __ SmiToInteger32(rcx, rax); + + __ bind(&done); + __ movl(rax, rdx); +} + + +// Input: rdx, rax are the left and right objects of a bit op. +// Output: rax, rcx are left and right integers for a bit op. +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, + Label* conversion_failure, + Register heap_number_map) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + __ JumpIfNotSmi(rdx, &arg1_is_object); + __ SmiToInteger32(rdx, rdx); + __ jmp(&load_arg2); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg1); + __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, conversion_failure); + __ movl(rdx, Immediate(0)); + __ jmp(&load_arg2); + + __ bind(&arg1_is_object); + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); + __ j(not_equal, &check_undefined_arg1); + // Get the untagged integer version of the edx heap number in rcx. + IntegerConvert(masm, rdx, rdx); + + // Here rdx has the untagged integer, rax has a Smi or a heap number. + __ bind(&load_arg2); + // Test if arg2 is a Smi. + __ JumpIfNotSmi(rax, &arg2_is_object); + __ SmiToInteger32(rax, rax); + __ movl(rcx, rax); + __ jmp(&done); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg2); + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); + __ j(not_equal, conversion_failure); + __ movl(rcx, Immediate(0)); + __ jmp(&done); + + __ bind(&arg2_is_object); + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); + __ j(not_equal, &check_undefined_arg2); + // Get the untagged integer version of the eax heap number in ecx. + IntegerConvert(masm, rcx, rax); + __ bind(&done); + __ movl(rax, rdx); +} + + +void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); +} + + +void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; + // Load operand in rdx into xmm0. + __ JumpIfSmi(rdx, &load_smi_rdx); + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + // Load operand in rax into xmm1. + __ JumpIfSmi(rax, &load_smi_rax); + __ bind(&load_nonsmi_rax); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_rdx); + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ JumpIfNotSmi(rax, &load_nonsmi_rax); + + __ bind(&load_smi_rax); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, + Label* not_numbers) { + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; + // Load operand in rdx into xmm0, or branch to not_numbers. + __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); + __ JumpIfSmi(rdx, &load_smi_rdx); + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); + __ j(not_equal, not_numbers); // Argument in rdx is not a number. + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + // Load operand in rax into xmm1, or branch to not_numbers. + __ JumpIfSmi(rax, &load_smi_rax); + + __ bind(&load_nonsmi_rax); + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); + __ j(not_equal, not_numbers); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_rdx); + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ JumpIfNotSmi(rax, &load_nonsmi_rax); + + __ bind(&load_smi_rax); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); + __ bind(&done); +} + + void GenericUnaryOpStub::Generate(MacroAssembler* masm) { Label slow, done; @@ -8588,6 +9466,172 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in rdx and the parameter count is in rax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ JumpIfNotSmi(rdx, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adaptor); + + // Check index against formal parameters count limit passed in + // through register rax. Use unsigned comparison to get negative + // check for free. + __ cmpq(rdx, rax); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); + __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ Ret(); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmpq(rdx, rcx); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); + __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ Ret(); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(rbx); // Return address. + __ push(rdx); + __ push(rbx); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // rsp[0] : return address + // rsp[8] : number of parameters + // rsp[16] : receiver displacement + // rsp[24] : function + + // The displacement is used for skipping the return address and the + // frame pointer on the stack. It is the offset of the last + // parameter (if any) relative to the frame pointer. + static const int kDisplacement = 2 * kPointerSize; + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adaptor_frame); + + // Get the length from the frame. + __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); + __ jmp(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ SmiToInteger32(rcx, + Operand(rdx, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + // Space on stack must already hold a smi. + __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); + // Do not clobber the length index for the indexing operation since + // it is used compute the size for allocation later. + __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); + __ movq(Operand(rsp, 2 * kPointerSize), rdx); + + // Try the new space allocation. Start out with computing the size of + // the arguments object and the elements array. + Label add_arguments_object; + __ bind(&try_allocate); + __ testl(rcx, rcx); + __ j(zero, &add_arguments_object); + __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ bind(&add_arguments_object); + __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); + __ movq(rdi, Operand(rdi, offset)); + + // Copy the JS object part. + STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); + __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); + __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); + __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); + __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); + __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); + __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); + + // Setup the callee in-object property. + ASSERT(Heap::arguments_callee_index == 0); + __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); + __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); + + // Get the length (smi tagged) and set that as an in-object property too. + ASSERT(Heap::arguments_length_index == 1); + __ movq(rcx, Operand(rsp, 1 * kPointerSize)); + __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); + + // If there are no actual arguments, we're done. + Label done; + __ SmiTest(rcx); + __ j(zero, &done); + + // Get the parameters pointer from the stack and untag the length. + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); + __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); + __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); + __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); + __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); + __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. + + // Copy the fixed array slots. + Label loop; + __ bind(&loop); + __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. + __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); + __ addq(rdi, Immediate(kPointerSize)); + __ subq(rdx, Immediate(kPointerSize)); + __ decl(rcx); + __ j(not_zero, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + void RegExpExecStub::Generate(MacroAssembler* masm) { // Just jump directly to runtime if native RegExp is not selected at compile // time or if regexp entry in generated code is turned off runtime switch or @@ -8935,18 +9979,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { } -void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, - Register hash, - Register mask) { - __ and_(hash, mask); - // Each entry in string cache consists of two pointer sized fields, - // but times_twice_pointer_size (multiplication by 16) scale factor - // is not supported by addrmode on x64 platform. - // So we have to premultiply entry index before lookup. - __ shl(hash, Immediate(kPointerSizeLog2 + 1)); -} - - void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Register object, Register result, @@ -9026,6 +10058,18 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, } +void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, + Register hash, + Register mask) { + __ and_(hash, mask); + // Each entry in string cache consists of two pointer sized fields, + // but times_twice_pointer_size (multiplication by 16) scale factor + // is not supported by addrmode on x64 platform. + // So we have to premultiply entry index before lookup. + __ shl(hash, Immediate(kPointerSizeLog2 + 1)); +} + + void NumberToStringStub::Generate(MacroAssembler* masm) { Label runtime; @@ -9041,12 +10085,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -void RecordWriteStub::Generate(MacroAssembler* masm) { - masm->RecordWriteHelper(object_, addr_, scratch_); - masm->ret(0); -} - - static int NegativeComparisonResult(Condition cc) { ASSERT(cc != equal); ASSERT((cc == less) || (cc == less_equal) @@ -9257,7 +10295,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // A smi plus a heap object has the low bit set, a heap object plus // a heap object has the low bit clear. ASSERT_EQ(0, kSmiTag); - //ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask); + ASSERT_EQ(static_cast<int64_t>(1), kSmiTagMask); __ lea(rcx, Operand(rax, rdx, times_1, 0)); __ testb(rcx, Immediate(kSmiTagMask)); __ j(not_zero, ¬_both_objects); @@ -9322,280 +10360,73 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, } -// Call the function just below TOS on the stack with the given -// arguments. The receiver is the TOS. -void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, - CallFunctionFlags flags, - int position) { - // Push the arguments ("left-to-right") on the stack. - int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - Load(args->at(i)); - frame_->SpillTop(); - } - - // Record the position for debugging purposes. - CodeForSourcePosition(position); +void StackCheckStub::Generate(MacroAssembler* masm) { + // Because builtins always remove the receiver from the stack, we + // have to fake one to avoid underflowing the stack. The receiver + // must be inserted below the return address on the stack so we + // temporarily store that in a register. + __ pop(rax); + __ Push(Smi::FromInt(0)); + __ push(rax); - // Use the shared code stub to call the function. - InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - CallFunctionStub call_function(arg_count, in_loop, flags); - Result answer = frame_->CallStub(&call_function, arg_count + 1); - // Restore context and replace function on the stack with the - // result of the stub invocation. - frame_->RestoreContextRegister(); - frame_->SetElementAt(0, &answer); + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); } -void InstanceofStub::Generate(MacroAssembler* masm) { - // Implements "value instanceof function" operator. - // Expected input state: - // rsp[0] : return address - // rsp[1] : function pointer - // rsp[2] : value - // Returns a bitwise zero to indicate that the value - // is and instance of the function and anything else to - // indicate that the value is not an instance. - - // Get the object - go slow case if it's a smi. +void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow; - __ movq(rax, Operand(rsp, 2 * kPointerSize)); - __ JumpIfSmi(rax, &slow); - - // Check that the left hand is a JS object. Leave its map in rax. - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); - __ j(below, &slow); - __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); - __ j(above, &slow); - - // Get the prototype of the function. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); - // rdx is function, rax is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); - __ j(not_equal, &miss); - __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); - __ j(not_equal, &miss); - __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - __ bind(&miss); - __ TryGetFunctionPrototype(rdx, rbx, &slow); - - // Check that the function prototype is a JS object. - __ JumpIfSmi(rbx, &slow); - __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); - __ j(below, &slow); - __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); - __ j(above, &slow); - - // Register mapping: - // rax is object map. - // rdx is function. - // rbx is function prototype. - __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); - - __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - Label loop, is_instance, is_not_instance; - __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); - __ bind(&loop); - __ cmpq(rcx, rbx); - __ j(equal, &is_instance); - __ cmpq(rcx, kScratchRegister); - // The code at is_not_instance assumes that kScratchRegister contains a - // non-zero GCable value (the null object in this case). - __ j(equal, &is_not_instance); - __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); - __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ xorl(rax, rax); - // Store bitwise zero in the cache. This is a Smi in GC terms. - ASSERT_EQ(0, kSmiTag); - __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - __ bind(&is_not_instance); - // We have to store a non-zero value in the cache. - __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - // Slow-case: Go through the JavaScript implementation. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // rsp[0] : return address - // rsp[8] : number of parameters - // rsp[16] : receiver displacement - // rsp[24] : function - - // The displacement is used for skipping the return address and the - // frame pointer on the stack. It is the offset of the last - // parameter (if any) relative to the frame pointer. - static const int kDisplacement = 2 * kPointerSize; - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(equal, &adaptor_frame); - - // Get the length from the frame. - __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); - __ jmp(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ SmiToInteger32(rcx, - Operand(rdx, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - // Space on stack must already hold a smi. - __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); - // Do not clobber the length index for the indexing operation since - // it is used compute the size for allocation later. - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); - __ movq(Operand(rsp, 2 * kPointerSize), rdx); - - // Try the new space allocation. Start out with computing the size of - // the arguments object and the elements array. - Label add_arguments_object; - __ bind(&try_allocate); - __ testl(rcx, rcx); - __ j(zero, &add_arguments_object); - __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); - __ bind(&add_arguments_object); - __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); - __ movq(rdi, Operand(rdi, offset)); - - // Copy the JS object part. - STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); - __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); - __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); - __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); - __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); - __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); - __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); - - // Setup the callee in-object property. - ASSERT(Heap::arguments_callee_index == 0); - __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); - __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); - - // Get the length (smi tagged) and set that as an in-object property too. - ASSERT(Heap::arguments_length_index == 1); - __ movq(rcx, Operand(rsp, 1 * kPointerSize)); - __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); - - // If there are no actual arguments, we're done. - Label done; - __ SmiTest(rcx); - __ j(zero, &done); - - // Get the parameters pointer from the stack and untag the length. - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); - __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); - __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); - __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); - __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); - __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. - - // Copy the fixed array slots. - Label loop; - __ bind(&loop); - __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. - __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); - __ addq(rdi, Immediate(kPointerSize)); - __ subq(rdx, Immediate(kPointerSize)); - __ decl(rcx); - __ j(not_zero, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ ret(3 * kPointerSize); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The key is in rdx and the parameter count is in rax. + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // +1 ~ return address + Label receiver_is_value, receiver_is_js_object; + __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); - // The displacement is used for skipping the frame pointer on the - // stack. It is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = 1 * kPointerSize; + // Check if receiver is a smi (which is a number value). + __ JumpIfSmi(rax, &receiver_is_value); - // Check that the key is a smi. - Label slow; - __ JumpIfNotSmi(rdx, &slow); + // Check if the receiver is a valid JS object. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); + __ j(above_equal, &receiver_is_js_object); - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(equal, &adaptor); + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(rax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ LeaveInternalFrame(); + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); - // Check index against formal parameters count limit passed in - // through register rax. Use unsigned comparison to get negative - // check for free. - __ cmpq(rdx, rax); - __ j(above_equal, &slow); + __ bind(&receiver_is_js_object); + } - // Read the argument from the stack and return it. - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); - __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); - __ Ret(); + // Get the function to call from the stack. + // +2 ~ receiver, return address + __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmpq(rdx, rcx); - __ j(above_equal, &slow); + // Check that the function really is a JavaScript function. + __ JumpIfSmi(rdi, &slow); + // Goto slow case if we do not have a function. + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &slow); - // Read the argument from the stack and return it. - index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); - __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); - __ Ret(); + // Fast-case: Just invoke the function. + ParameterCount actual(argc_); + __ InvokeFunction(rdi, actual, JUMP_FUNCTION); - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. + // Slow-case: Non-function called. __ bind(&slow); - __ pop(rbx); // Return address. - __ push(rdx); - __ push(rbx); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); + __ Set(rax, argc_); + __ Set(rbx, 0); + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); + Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); + __ Jump(adaptor, RelocInfo::CODE_TARGET); } @@ -9628,6 +10459,11 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { } +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + UNREACHABLE(); +} + + void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -9818,62 +10654,6 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, } -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // +1 ~ return address - Label receiver_is_value, receiver_is_js_object; - __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ JumpIfSmi(rax, &receiver_is_value); - - // Check if the receiver is a valid JS object. - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); - __ j(above_equal, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(rax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ LeaveInternalFrame(); - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // +2 ~ receiver, return address - __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); - - // Check that the function really is a JavaScript function. - __ JumpIfSmi(rdi, &slow); - // Goto slow case if we do not have a function. - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &slow); - - // Fast-case: Just invoke the function. - ParameterCount actual(argc_); - __ InvokeFunction(rdi, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); - __ Set(rax, argc_); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); - __ Jump(adaptor, RelocInfo::CODE_TARGET); -} - - void CEntryStub::Generate(MacroAssembler* masm) { // rax: number of arguments including receiver // rbx: pointer to C function (C callee-saved) @@ -9942,11 +10722,6 @@ void CEntryStub::Generate(MacroAssembler* masm) { } -void ApiGetterEntryStub::Generate(MacroAssembler* masm) { - UNREACHABLE(); -} - - void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, exit; #ifdef ENABLE_LOGGING_AND_PROFILING @@ -10078,887 +10853,88 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } -// ----------------------------------------------------------------------------- -// Implementation of stubs. - -// Stub classes have public member named masm, not masm_. - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Because builtins always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. The receiver - // must be inserted below the return address on the stack so we - // temporarily store that in a register. - __ pop(rax); - __ Push(Smi::FromInt(0)); - __ push(rax); - - // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); -} - - -void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); -} - - -void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; - // Load operand in rdx into xmm0. - __ JumpIfSmi(rdx, &load_smi_rdx); - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - // Load operand in rax into xmm1. - __ JumpIfSmi(rax, &load_smi_rax); - __ bind(&load_nonsmi_rax); - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_rdx); - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ JumpIfNotSmi(rax, &load_nonsmi_rax); - - __ bind(&load_smi_rax); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, - Label* not_numbers) { - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; - // Load operand in rdx into xmm0, or branch to not_numbers. - __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); - __ JumpIfSmi(rdx, &load_smi_rdx); - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); - __ j(not_equal, not_numbers); // Argument in rdx is not a number. - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - // Load operand in rax into xmm1, or branch to not_numbers. - __ JumpIfSmi(rax, &load_smi_rax); - - __ bind(&load_nonsmi_rax); - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); - __ j(not_equal, not_numbers); - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_rdx); - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ JumpIfNotSmi(rax, &load_nonsmi_rax); - - __ bind(&load_smi_rax); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); - __ bind(&done); -} - - -// Input: rdx, rax are the left and right objects of a bit op. -// Output: rax, rcx are left and right integers for a bit op. -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - Label* conversion_failure, - Register heap_number_map) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - __ JumpIfNotSmi(rdx, &arg1_is_object); - __ SmiToInteger32(rdx, rdx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ movl(rdx, Immediate(0)); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg1); - // Get the untagged integer version of the edx heap number in rcx. - IntegerConvert(masm, rdx, rdx); - - // Here rdx has the untagged integer, rax has a Smi or a heap number. - __ bind(&load_arg2); - // Test if arg2 is a Smi. - __ JumpIfNotSmi(rax, &arg2_is_object); - __ SmiToInteger32(rax, rax); - __ movl(rcx, rax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ movl(rcx, Immediate(0)); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg2); - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, rcx, rax); - __ bind(&done); - __ movl(rax, rdx); -} - - -// Input: rdx, rax are the left and right objects of a bit op. -// Output: rax, rcx are left and right integers for a bit op. -void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { - // Check float operands. - Label done; - Label rax_is_smi; - Label rax_is_object; - Label rdx_is_object; - - __ JumpIfNotSmi(rdx, &rdx_is_object); - __ SmiToInteger32(rdx, rdx); - __ JumpIfSmi(rax, &rax_is_smi); - - __ bind(&rax_is_object); - IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. - __ jmp(&done); - - __ bind(&rdx_is_object); - IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. - __ JumpIfNotSmi(rax, &rax_is_object); - __ bind(&rax_is_smi); - __ SmiToInteger32(rcx, rax); - - __ bind(&done); - __ movl(rax, rdx); -} - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int len = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(len); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - - OS::SNPrintF(Vector<char>(name_, len), - "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", - op_name, - overwrite_name, - (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", - args_in_registers_ ? "RegArgs" : "StackArgs", - args_reversed_ ? "_R" : "", - static_operands_type_.ToString(), - BinaryOpIC::GetName(runtime_operands_type_)); - return name_; -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (!(left.is(left_arg) && right.is(right_arg))) { - if (left.is(right_arg) && right.is(left_arg)) { - if (IsOperationCommutative()) { - SetArgsReversed(); - } else { - __ xchg(left, right); - } - } else if (left.is(left_arg)) { - __ movq(right_arg, right); - } else if (right.is(right_arg)) { - __ movq(left_arg, left); - } else if (left.is(right_arg)) { - if (IsOperationCommutative()) { - __ movq(left_arg, right); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying left argument. - __ movq(left_arg, left); - __ movq(right_arg, right); - } - } else if (right.is(left_arg)) { - if (IsOperationCommutative()) { - __ movq(right_arg, left); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying right argument. - __ movq(right_arg, right); - __ movq(left_arg, left); - } - } else { - // Order of moves is not important. - __ movq(left_arg, left); - __ movq(right_arg, right); - } - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Smi* right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ Push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (left.is(left_arg)) { - __ Move(right_arg, right); - } else if (left.is(right_arg) && IsOperationCommutative()) { - __ Move(left_arg, right); - SetArgsReversed(); - } else { - // For non-commutative operations, left and right_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite left before moving - // it to left_arg. - __ movq(left_arg, left); - __ Move(right_arg, right); - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Smi* left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ Push(left); - __ push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (right.is(right_arg)) { - __ Move(left_arg, left); - } else if (right.is(left_arg) && IsOperationCommutative()) { - __ Move(right_arg, left); - SetArgsReversed(); - } else { - // For non-commutative operations, right and left_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite right before moving - // it to right_arg. - __ movq(right_arg, right); - __ Move(left_arg, left); - } - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, - VirtualFrame* frame, - Result* left, - Result* right) { - if (ArgsInRegistersSupported()) { - SetArgsInRegisters(); - return frame->CallStub(this, left, right); - } else { - frame->Push(left); - frame->Push(right); - return frame->CallStub(this, 2); - } -} - - -void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { - // 1. Move arguments into rdx, rax except for DIV and MOD, which need the - // dividend in rax and rdx free for the division. Use rax, rbx for those. - Comment load_comment(masm, "-- Load arguments"); - Register left = rdx; - Register right = rax; - if (op_ == Token::DIV || op_ == Token::MOD) { - left = rax; - right = rbx; - if (HasArgsInRegisters()) { - __ movq(rbx, rax); - __ movq(rax, rdx); - } - } - if (!HasArgsInRegisters()) { - __ movq(right, Operand(rsp, 1 * kPointerSize)); - __ movq(left, Operand(rsp, 2 * kPointerSize)); - } - - Label not_smis; - // 2. Smi check both operands. - if (static_operands_type_.IsSmi()) { - // Skip smi check if we know that both arguments are smis. - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); - } - if (op_ == Token::BIT_OR) { - // Handle OR here, since we do extra smi-checking in the or code below. - __ SmiOr(right, right, left); - GenerateReturn(masm); - return; - } - } else { - if (op_ != Token::BIT_OR) { - // Skip the check for OR as it is better combined with the - // actual operation. - Comment smi_check_comment(masm, "-- Smi check arguments"); - __ JumpIfNotBothSmi(left, right, ¬_smis); - } - } - - // 3. Operands are both smis (except for OR), perform the operation leaving - // the result in rax and check the result if necessary. - Comment perform_smi(masm, "-- Perform smi operation"); - Label use_fp_on_smis; - switch (op_) { - case Token::ADD: { - ASSERT(right.is(rax)); - __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. - break; - } - - case Token::SUB: { - __ SmiSub(left, left, right, &use_fp_on_smis); - __ movq(rax, left); - break; - } - - case Token::MUL: - ASSERT(right.is(rax)); - __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. - break; - - case Token::DIV: - ASSERT(left.is(rax)); - __ SmiDiv(left, left, right, &use_fp_on_smis); - break; - - case Token::MOD: - ASSERT(left.is(rax)); - __ SmiMod(left, left, right, slow); - break; - - case Token::BIT_OR: - ASSERT(right.is(rax)); - __ movq(rcx, right); // Save the right operand. - __ SmiOr(right, right, left); // BIT_OR is commutative. - __ testb(right, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smis); - break; - - case Token::BIT_AND: - ASSERT(right.is(rax)); - __ SmiAnd(right, right, left); // BIT_AND is commutative. - break; - - case Token::BIT_XOR: - ASSERT(right.is(rax)); - __ SmiXor(right, right, left); // BIT_XOR is commutative. - break; - - case Token::SHL: - case Token::SHR: - case Token::SAR: - switch (op_) { - case Token::SAR: - __ SmiShiftArithmeticRight(left, left, right); - break; - case Token::SHR: - __ SmiShiftLogicalRight(left, left, right, slow); - break; - case Token::SHL: - __ SmiShiftLeft(left, left, right); - break; - default: - UNREACHABLE(); - } - __ movq(rax, left); - break; - - default: - UNREACHABLE(); - break; - } - - // 4. Emit return of result in rax. - GenerateReturn(masm); - - // 5. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - ASSERT(use_fp_on_smis.is_linked()); - __ bind(&use_fp_on_smis); - if (op_ == Token::DIV) { - __ movq(rdx, rax); - __ movq(rax, rbx); - } - // left is rdx, right is rax. - __ AllocateHeapNumber(rbx, rcx, slow); - FloatingPointHelper::LoadSSE2SmiOperands(masm); - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rbx); - GenerateReturn(masm); - } - default: - break; - } - - // 6. Non-smi operands, fall out to the non-smi code with the operands in - // rdx and rax. - Comment done_comment(masm, "-- Enter non-smi code"); - __ bind(¬_smis); - - switch (op_) { - case Token::DIV: - case Token::MOD: - // Operands are in rax, rbx at this point. - __ movq(rdx, rax); - __ movq(rax, rbx); - break; - - case Token::BIT_OR: - // Right operand is saved in rcx and rax was destroyed by the smi - // operation. - __ movq(rax, rcx); - break; - - default: - break; - } -} - - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - Label call_runtime; - - if (ShouldGenerateSmiCode()) { - GenerateSmiCode(masm, &call_runtime); - } else if (op_ != Token::MOD) { - if (!HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - } - // Floating point case. - if (ShouldGenerateFPCode()) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - HasSmiCodeInStub()) { - // Execution reaches this point when the first non-smi argument occurs - // (and only if smi code is generated). This is the right moment to - // patch to HEAP_NUMBERS state. The transition is attempted only for - // the four basic operations. The stub stays in the DEFAULT state - // forever for all other operations (also if smi code is skipped). - GenerateTypeTransition(masm); - break; - } - - Label not_floats; - // rax: y - // rdx: x - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(rdx); - __ AbortIfNotNumber(rax); - } - FloatingPointHelper::LoadSSE2NumberOperands(masm); - } else { - FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); - } - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - // Allocate a heap number, if needed. - Label skip_allocation; - OverwriteMode mode = mode_; - if (HasArgsReversed()) { - if (mode == OVERWRITE_RIGHT) { - mode = OVERWRITE_LEFT; - } else if (mode == OVERWRITE_LEFT) { - mode = OVERWRITE_RIGHT; - } - } - switch (mode) { - case OVERWRITE_LEFT: - __ JumpIfNotSmi(rdx, &skip_allocation); - __ AllocateHeapNumber(rbx, rcx, &call_runtime); - __ movq(rdx, rbx); - __ bind(&skip_allocation); - __ movq(rax, rdx); - break; - case OVERWRITE_RIGHT: - // If the argument in rax is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(rax, &skip_allocation); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep rax and rdx intact - // for the possible runtime call. - __ AllocateHeapNumber(rbx, rcx, &call_runtime); - __ movq(rax, rbx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - __ bind(¬_floats); - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - !HasSmiCodeInStub()) { - // Execution reaches this point when the first non-number argument - // occurs (and only if smi code is skipped from the stub, otherwise - // the patching has already been done earlier in this case branch). - // A perfect moment to try patching to STRINGS for ADD operation. - if (op_ == Token::ADD) { - GenerateTypeTransition(masm); - } - } - break; - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label skip_allocation, non_smi_shr_result; - Register heap_number_map = r9; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(rdx); - __ AbortIfNotNumber(rax); - } - FloatingPointHelper::LoadNumbersAsIntegers(masm); - } else { - FloatingPointHelper::LoadAsIntegers(masm, - &call_runtime, - heap_number_map); - } - switch (op_) { - case Token::BIT_OR: __ orl(rax, rcx); break; - case Token::BIT_AND: __ andl(rax, rcx); break; - case Token::BIT_XOR: __ xorl(rax, rcx); break; - case Token::SAR: __ sarl_cl(rax); break; - case Token::SHL: __ shll_cl(rax); break; - case Token::SHR: { - __ shrl_cl(rax); - // Check if result is negative. This can only happen for a shift - // by zero. - __ testl(rax, rax); - __ j(negative, &non_smi_shr_result); - break; - } - default: UNREACHABLE(); - } - - STATIC_ASSERT(kSmiValueSize == 32); - // Tag smi result and return. - __ Integer32ToSmi(rax, rax); - GenerateReturn(masm); - - // All bit-ops except SHR return a signed int32 that can be - // returned immediately as a smi. - // We might need to allocate a HeapNumber if we shift a negative - // number right by zero (i.e., convert to UInt32). - if (op_ == Token::SHR) { - ASSERT(non_smi_shr_result.is_linked()); - __ bind(&non_smi_shr_result); - // Allocate a heap number if needed. - __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ JumpIfNotSmi(rax, &skip_allocation); - // Fall through! - case NO_OVERWRITE: - // Allocate heap number in new space. - // Not using AllocateHeapNumber macro in order to reuse - // already loaded heap_number_map. - __ AllocateInNewSpace(HeapNumber::kSize, - rax, - rcx, - no_reg, - &call_runtime, - TAG_OBJECT); - // Set the map. - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } - __ movq(FieldOperand(rax, HeapObject::kMapOffset), - heap_number_map); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - __ cvtqsi2sd(xmm0, rbx); - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - } - - break; - } - default: UNREACHABLE(); break; - } - } - - // If all else fails, use the runtime system to get the correct - // result. If arguments was passed in registers now place them on the - // stack in the correct order below the return address. - __ bind(&call_runtime); - - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - switch (op_) { - case Token::ADD: { - // Registers containing left and right operands respectively. - Register lhs, rhs; - - if (HasArgsReversed()) { - lhs = rax; - rhs = rdx; - } else { - lhs = rdx; - rhs = rax; - } - - // Test for string arguments before calling runtime. - Label not_strings, both_strings, not_string1, string1, string1_smi2; - - // If this stub has already generated FP-specific code then the arguments - // are already in rdx and rax. - if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - - Condition is_smi; - is_smi = masm->CheckSmi(lhs); - __ j(is_smi, ¬_string1); - __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); - __ j(above_equal, ¬_string1); - - // First argument is a a string, test second. - is_smi = masm->CheckSmi(rhs); - __ j(is_smi, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); - __ j(above_equal, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, rbx, rcx, r8, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ movq(Operand(rsp, 1 * kPointerSize), rbx); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); - - // First argument was not a string, test second. - __ bind(¬_string1); - is_smi = masm->CheckSmi(rhs); - __ j(is_smi, ¬_strings); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); - __ j(above_equal, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); - - __ bind(¬_strings); - // Neither argument is a string. - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - } - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { - ASSERT(!HasArgsInRegisters()); - __ movq(rax, Operand(rsp, 1 * kPointerSize)); - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); -} - - -void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { - // If arguments are not passed in registers remove them from the stack before - // returning. - if (!HasArgsInRegisters()) { - __ ret(2 * kPointerSize); // Remove both operands - } else { - __ ret(0); - } -} - +void InstanceofStub::Generate(MacroAssembler* masm) { + // Implements "value instanceof function" operator. + // Expected input state: + // rsp[0] : return address + // rsp[1] : function pointer + // rsp[2] : value + // Returns a bitwise zero to indicate that the value + // is and instance of the function and anything else to + // indicate that the value is not an instance. -void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - ASSERT(HasArgsInRegisters()); - __ pop(rcx); - if (HasArgsReversed()) { - __ push(rax); - __ push(rdx); - } else { - __ push(rdx); - __ push(rax); - } - __ push(rcx); -} + // Get the object - go slow case if it's a smi. + Label slow; + __ movq(rax, Operand(rsp, 2 * kPointerSize)); + __ JumpIfSmi(rax, &slow); + // Check that the left hand is a JS object. Leave its map in rax. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); + __ j(below, &slow); + __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; + // Get the prototype of the function. + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); + // rdx is function, rax is map. - // Ensure the operands are on the stack. - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } + // Look up the function and the map in the instanceof cache. + Label miss; + __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); + __ j(not_equal, &miss); + __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); + __ j(not_equal, &miss); + __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); - // Left and right arguments are already on stack. - __ pop(rcx); // Save the return address. + __ bind(&miss); + __ TryGetFunctionPrototype(rdx, rbx, &slow); - // Push this stub's key. - __ Push(Smi::FromInt(MinorKey())); + // Check that the function prototype is a JS object. + __ JumpIfSmi(rbx, &slow); + __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); + __ j(below, &slow); + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); - // Although the operation and the type info are encoded into the key, - // the encoding is opaque, so push them too. - __ Push(Smi::FromInt(op_)); + // Register mapping: + // rax is object map. + // rdx is function. + // rbx is function prototype. + __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); - __ Push(Smi::FromInt(runtime_operands_type_)); + __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); - __ push(rcx); // The return address. + // Loop through the prototype chain looking for the function prototype. + Label loop, is_instance, is_not_instance; + __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); + __ bind(&loop); + __ cmpq(rcx, rbx); + __ j(equal, &is_instance); + __ cmpq(rcx, kScratchRegister); + // The code at is_not_instance assumes that kScratchRegister contains a + // non-zero GCable value (the null object in this case). + __ j(equal, &is_not_instance); + __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); + __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); + __ jmp(&loop); - // Perform patching to an appropriate fast case and return the result. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} + __ bind(&is_instance); + __ xorl(rax, rax); + // Store bitwise zero in the cache. This is a Smi in GC terms. + ASSERT_EQ(0, kSmiTag); + __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); + __ bind(&is_not_instance); + // We have to store a non-zero value in the cache. + __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); -Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } @@ -12067,6 +12043,11 @@ ModuloFunction CreateModuloFunction() { #undef __ +void RecordWriteStub::Generate(MacroAssembler* masm) { + masm->RecordWriteHelper(object_, addr_, scratch_); + masm->ret(0); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index c6be503366..da13ee298a 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -2243,11 +2243,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ Push(Smi::FromInt(0)); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ movq(rbx, rax); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 76200d7e54..a5634a794d 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -2322,101 +2322,6 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) { } -Register MacroAssembler::CheckMaps(JSObject* object, - Register object_reg, - JSObject* holder, - Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss) { - // Make sure there's no overlap between scratch and the other - // registers. - ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); - - // Keep track of the current object in register reg. On the first - // iteration, reg is an alias for object_reg, on later iterations, - // it is an alias for holder_reg. - Register reg = object_reg; - int depth = 0; - - if (save_at_depth == depth) { - movq(Operand(rsp, kPointerSize), object_reg); - } - - // Check the maps in the prototype chain. - // Traverse the prototype chain from the object and do map checks. - while (object != holder) { - depth++; - - // Only global objects and objects that do not require access - // checks are allowed in stubs. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - JSObject* prototype = JSObject::cast(object->GetPrototype()); - if (Heap::InNewSpace(prototype)) { - // Get the map of the current object. - movq(scratch, FieldOperand(reg, HeapObject::kMapOffset)); - Cmp(scratch, Handle<Map>(object->map())); - // Branch on the result of the map check. - j(not_equal, miss); - // Check access rights to the global object. This has to happen - // after the map check so that we know that the object is - // actually a global object. - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - - // Restore scratch register to be the map of the object. - // We load the prototype from the map in the scratch register. - movq(scratch, FieldOperand(reg, HeapObject::kMapOffset)); - } - // The prototype is in new space; we cannot store a reference - // to it in the code. Load it from the map. - reg = holder_reg; // from now the object is in holder_reg - movq(reg, FieldOperand(scratch, Map::kPrototypeOffset)); - - } else { - // Check the map of the current object. - Cmp(FieldOperand(reg, HeapObject::kMapOffset), - Handle<Map>(object->map())); - // Branch on the result of the map check. - j(not_equal, miss); - // Check access rights to the global object. This has to happen - // after the map check so that we know that the object is - // actually a global object. - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - } - // The prototype is in old space; load it directly. - reg = holder_reg; // from now the object is in holder_reg - Move(reg, Handle<JSObject>(prototype)); - } - - if (save_at_depth == depth) { - movq(Operand(rsp, kPointerSize), reg); - } - - // Go to the next object in the prototype chain. - object = prototype; - } - - // Check the holder map. - Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map())); - j(not_equal, miss); - - // Log the check depth. - LOG(IntEvent("check-maps-depth", depth + 1)); - - // Perform security check for access to the global object and return - // the holder register. - ASSERT(object == holder); - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - } - return reg; -} - - void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index a256ab82bd..64f35e10de 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -596,24 +596,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Inline caching support - // Generates code that verifies that the maps of objects in the - // prototype chain of object hasn't changed since the code was - // generated and branches to the miss label if any map has. If - // necessary the function also generates code for security check - // in case of global object holders. The scratch and holder - // registers are always clobbered, but the object register is only - // clobbered if it the same as the holder register. The function - // returns a register containing the holder - either object_reg or - // holder_reg. - // The function can optionally (when save_at_depth != - // kInvalidProtoDepth) save the object at the given depth by moving - // it to [rsp + kPointerSize]. - Register CheckMaps(JSObject* object, Register object_reg, - JSObject* holder, Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss); - // Generate code for checking access rights - used for security checks // on access to global objects across environments. The holder register // is left untouched, but the scratch register and kScratchRegister, diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index 383399ea60..80318648ea 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -960,7 +960,6 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { CodeDesc code_desc; masm_->GetCode(&code_desc); Handle<Code> code = Factory::NewCode(code_desc, - NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); PROFILE(RegExpCodeCreateEvent(*code, *source)); diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index ab75b96857..53301cc461 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -81,6 +81,100 @@ static void ProbeTable(MacroAssembler* masm, } +// Helper function used to check that the dictionary doesn't contain +// the property. This function may return false negatives, so miss_label +// must always call a backup property check that is complete. +// This function is safe to call if the receiver has fast properties. +// Name must be a symbol and receiver must be a heap object. +static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, + Label* miss_label, + Register receiver, + String* name, + Register r0, + Register r1) { + ASSERT(name->IsSymbol()); + __ IncrementCounter(&Counters::negative_lookups, 1); + __ IncrementCounter(&Counters::negative_lookups_miss, 1); + + Label done; + __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset)); + + const int kInterceptorOrAccessCheckNeededMask = + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + + // Bail out if the receiver has a named interceptor or requires access checks. + __ testb(FieldOperand(r0, Map::kBitFieldOffset), + Immediate(kInterceptorOrAccessCheckNeededMask)); + __ j(not_zero, miss_label); + + // Check that receiver is a JSObject. + __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE); + __ j(below, miss_label); + + // Load properties array. + Register properties = r0; + __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset)); + + // Check that the properties array is a dictionary. + __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset), + Heap::kHashTableMapRootIndex); + __ j(not_equal, miss_label); + + // Compute the capacity mask. + const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + + // Generate an unrolled loop that performs a few probes before + // giving up. + static const int kProbes = 4; + const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the null value). + for (int i = 0; i < kProbes; i++) { + // r0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = r1; + // Capacity is smi 2^n. + __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset)); + __ decl(index); + __ and_(index, + Immediate(name->Hash() + StringDictionary::GetProbeOffset(i))); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. + + Register entity_name = r1; + // Having undefined at this place means the name is not contained. + ASSERT_EQ(kSmiTagSize, 1); + __ movq(entity_name, Operand(properties, index, times_pointer_size, + kElementsStartOffset - kHeapObjectTag)); + __ Cmp(entity_name, Factory::undefined_value()); + // __ jmp(miss_label); + if (i != kProbes - 1) { + __ j(equal, &done); + + // Stop if found the property. + __ Cmp(entity_name, Handle<String>(name)); + __ j(equal, miss_label); + } else { + // Give up probing if still not found the undefined value. + __ j(not_equal, miss_label); + } + } + + __ bind(&done); + __ DecrementCounter(&Counters::negative_lookups_miss, 1); +} + + void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); Code* code = NULL; @@ -497,6 +591,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, Label* miss) { ASSERT(holder->HasNamedInterceptor()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); @@ -512,6 +607,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, holder, lookup, name, @@ -523,6 +619,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, name, holder, miss); @@ -535,6 +632,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, JSObject* interceptor_holder, LookupResult* lookup, String* name, @@ -574,7 +672,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, scratch1, - scratch2, name, depth1, miss); + scratch2, scratch3, name, depth1, miss); // Invoke an interceptor and if it provides a value, // branch to |regular_invoke|. @@ -590,7 +688,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { if (interceptor_holder != lookup->holder()) { stub_compiler_->CheckPrototypes(interceptor_holder, receiver, lookup->holder(), scratch1, - scratch2, name, depth2, miss); + scratch2, scratch3, name, depth2, miss); } else { // CheckPrototypes has a side effect of fetching a 'holder' // for API (object which is instanceof for the signature). It's @@ -626,12 +724,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, String* name, JSObject* interceptor_holder, Label* miss_label) { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, + scratch1, scratch2, scratch3, name, miss_label); __ EnterInternalFrame(); @@ -784,7 +883,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, // Check that the maps haven't changed. CheckPrototypes(JSObject::cast(object), rdx, holder, - rbx, rax, name, depth, &miss); + rbx, rax, rdi, name, depth, &miss); // Patch the receiver on the stack with the global proxy if // necessary. @@ -807,7 +906,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::STRING_FUNCTION_INDEX, rax); CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder, - rbx, rdx, name, &miss); + rbx, rdx, rdi, name, &miss); } break; @@ -826,7 +925,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::NUMBER_FUNCTION_INDEX, rax); CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder, - rbx, rdx, name, &miss); + rbx, rdx, rdi, name, &miss); } break; } @@ -847,7 +946,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::BOOLEAN_FUNCTION_INDEX, rax); CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder, - rbx, rdx, name, &miss); + rbx, rdx, rdi, name, &miss); } break; } @@ -902,7 +1001,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, __ JumpIfSmi(rdx, &miss); // Do the right check and compute the holder register. - Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss); + Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi, + name, &miss); GenerateFastPropertyLoad(masm(), rdi, reg, holder, index); @@ -965,6 +1065,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, holder, rbx, rax, + rdi, name, &miss); @@ -1119,7 +1220,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, - rax, name, &miss); + rax, rdi, name, &miss); // Get the elements array of the object. __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset)); @@ -1226,6 +1327,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, rdx, rbx, rdi, + rax, &miss); // Restore receiver. @@ -1288,7 +1390,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss); + CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, &miss); // Get the value from the cell. __ Move(rdi, Handle<JSGlobalPropertyCell>(cell)); @@ -1353,7 +1455,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name, Label miss; Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, + bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, rdi, callback, name, &miss, &failure); if (!success) return failure; @@ -1376,7 +1478,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss); + GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1401,7 +1503,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name, // Check the maps of the full prototype chain. Also check that // global property cells up to (but not including) the last object // in the prototype chain are empty. - CheckPrototypes(object, rax, last, rbx, rdx, name, &miss); + CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss); // If the last object in the prototype chain is a global object, // check that the global property cell is empty. @@ -1438,7 +1540,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss); + GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1469,6 +1571,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, rcx, rdx, rbx, + rdi, name, &miss); @@ -1500,7 +1603,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss); + CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss); // Get the value from the cell. __ Move(rbx, Handle<JSGlobalPropertyCell>(cell)); @@ -1546,7 +1649,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ j(not_equal, &miss); Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, + bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback, name, &miss, &failure); if (!success) return failure; @@ -1600,7 +1703,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ Cmp(rax, Handle<String>(name)); __ j(not_equal, &miss); - GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, + GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi, value, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_constant_function, 1); @@ -1660,6 +1763,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, rax, rcx, rbx, + rdi, name, &miss); __ bind(&miss); @@ -1875,7 +1979,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ Cmp(rax, Handle<String>(name)); __ j(not_equal, &miss); - GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss); + GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_field, 1); @@ -1954,6 +2058,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, String* name, Label* miss) { ASSERT(interceptor_holder->HasNamedInterceptor()); @@ -1981,7 +2086,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); // Save necessary data before invoking an interceptor. @@ -2029,6 +2135,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, lookup->holder(), scratch1, scratch2, + scratch3, name, miss); } @@ -2068,7 +2175,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Call the runtime system to load the interceptor. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); __ pop(scratch2); // save old return address PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, interceptor_holder); @@ -2087,6 +2195,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, AccessorInfo* callback, String* name, Label* miss, @@ -2097,7 +2206,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, // Check that the maps haven't changed. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); // Push the arguments on the JS stack of the caller. __ pop(scratch2); // remove return address @@ -2122,41 +2231,143 @@ Register StubCompiler::CheckPrototypes(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, int save_at_depth, - Label* miss, - Register extra) { - // Check that the maps haven't changed. - Register result = - masm()->CheckMaps(object, - object_reg, - holder, - holder_reg, - scratch, - save_at_depth, - miss); + Label* miss) { + // Make sure there's no overlap between holder and object registers. + ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); + + // Keep track of the current object in register reg. On the first + // iteration, reg is an alias for object_reg, on later iterations, + // it is an alias for holder_reg. + Register reg = object_reg; + int depth = 0; + + if (save_at_depth == depth) { + __ movq(Operand(rsp, kPointerSize), object_reg); + } + + // Check the maps in the prototype chain. + // Traverse the prototype chain from the object and do map checks. + JSObject* current = object; + while (current != holder) { + depth++; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + + JSObject* prototype = JSObject::cast(current->GetPrototype()); + if (!current->HasFastProperties() && + !current->IsJSGlobalObject() && + !current->IsJSGlobalProxy()) { + if (!name->IsSymbol()) { + Object* lookup_result = Heap::LookupSymbol(name); + if (lookup_result->IsFailure()) { + set_failure(Failure::cast(lookup_result)); + return reg; + } else { + name = String::cast(lookup_result); + } + } + ASSERT(current->property_dictionary()->FindEntry(name) == + StringDictionary::kNotFound); + + GenerateDictionaryNegativeLookup(masm(), + miss, + reg, + name, + scratch1, + scratch2); + __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + reg = holder_reg; // from now the object is in holder_reg + __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); + } else if (Heap::InNewSpace(prototype)) { + // Get the map of the current object. + __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + __ Cmp(scratch1, Handle<Map>(current->map())); + // Branch on the result of the map check. + __ j(not_equal, miss); + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + + // Restore scratch register to be the map of the object. + // We load the prototype from the map in the scratch register. + __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + } + // The prototype is in new space; we cannot store a reference + // to it in the code. Load it from the map. + reg = holder_reg; // from now the object is in holder_reg + __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); + + } else { + // Check the map of the current object. + __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), + Handle<Map>(current->map())); + // Branch on the result of the map check. + __ j(not_equal, miss); + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + } + // The prototype is in old space; load it directly. + reg = holder_reg; // from now the object is in holder_reg + __ Move(reg, Handle<JSObject>(prototype)); + } + + if (save_at_depth == depth) { + __ movq(Operand(rsp, kPointerSize), reg); + } + + // Go to the next object in the prototype chain. + current = prototype; + } + + // Check the holder map. + __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map())); + __ j(not_equal, miss); + + // Log the check depth. + LOG(IntEvent("check-maps-depth", depth + 1)); + + // Perform security check for access to the global object and return + // the holder register. + ASSERT(current == holder); + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + } // If we've skipped any global objects, it's not enough to verify // that their maps haven't changed. We also need to check that the // property cell for the property is still empty. - while (object != holder) { - if (object->IsGlobalObject()) { + current = object; + while (current != holder) { + if (current->IsGlobalObject()) { Object* cell = GenerateCheckPropertyCell(masm(), - GlobalObject::cast(object), + GlobalObject::cast(current), name, - scratch, + scratch1, miss); if (cell->IsFailure()) { set_failure(Failure::cast(cell)); - return result; + return reg; } } - object = JSObject::cast(object->GetPrototype()); + current = JSObject::cast(current->GetPrototype()); } // Return the register containing the holder. - return result; + return reg; } @@ -2165,6 +2376,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, int index, String* name, Label* miss) { @@ -2174,7 +2386,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, // Check the prototype chain. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); // Get the value from the properties. GenerateFastPropertyLoad(masm(), rax, reg, holder, index); @@ -2187,6 +2399,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, Object* value, String* name, Label* miss) { @@ -2196,7 +2409,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, // Check that the maps haven't changed. Register reg = CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, name, miss); // Return the constant value. __ Move(rax, Handle<Object>(value)); diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 330ca5bcd1..bd6108c0a0 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -470,7 +470,10 @@ TEST(MakingExternalStringConditions) { i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(0, i::NEW_SPACE); - Local<String> small_string = String::New(AsciiToTwoByteString("small")); + uint16_t* two_byte_string = AsciiToTwoByteString("small"); + Local<String> small_string = String::New(two_byte_string); + i::DeleteArray(two_byte_string); + // We should refuse to externalize newly created small string. CHECK(!small_string->CanMakeExternal()); // Trigger GCs so that the newly allocated string moves to old gen. @@ -479,7 +482,10 @@ TEST(MakingExternalStringConditions) { // Old space strings should be accepted. CHECK(small_string->CanMakeExternal()); - small_string = String::New(AsciiToTwoByteString("small 2")); + two_byte_string = AsciiToTwoByteString("small 2"); + small_string = String::New(two_byte_string); + i::DeleteArray(two_byte_string); + // We should refuse externalizing newly created small string. CHECK(!small_string->CanMakeExternal()); for (int i = 0; i < 100; i++) { @@ -492,8 +498,11 @@ TEST(MakingExternalStringConditions) { char* buf = i::NewArray<char>(buf_size); memset(buf, 'a', buf_size); buf[buf_size - 1] = '\0'; - Local<String> large_string = String::New(AsciiToTwoByteString(buf)); + + two_byte_string = AsciiToTwoByteString(buf); + Local<String> large_string = String::New(two_byte_string); i::DeleteArray(buf); + i::DeleteArray(two_byte_string); // Large strings should be immediately accepted. CHECK(large_string->CanMakeExternal()); } @@ -688,7 +697,11 @@ THREADED_TEST(StringConcat) { const char* two_byte_string_2 = "a_times_two_plus_b(4, 8) + "; const char* two_byte_extern_2 = "a_times_two_plus_b(1, 2);"; Local<String> left = v8_str(one_byte_string_1); - Local<String> right = String::New(AsciiToTwoByteString(two_byte_string_1)); + + uint16_t* two_byte_source = AsciiToTwoByteString(two_byte_string_1); + Local<String> right = String::New(two_byte_source); + i::DeleteArray(two_byte_source); + Local<String> source = String::Concat(left, right); right = String::NewExternal( new TestAsciiResource(i::StrDup(one_byte_extern_1))); @@ -698,7 +711,11 @@ THREADED_TEST(StringConcat) { source = String::Concat(source, right); right = v8_str(one_byte_string_2); source = String::Concat(source, right); - right = String::New(AsciiToTwoByteString(two_byte_string_2)); + + two_byte_source = AsciiToTwoByteString(two_byte_string_2); + right = String::New(two_byte_source); + i::DeleteArray(two_byte_source); + source = String::Concat(source, right); right = String::NewExternal( new TestResource(AsciiToTwoByteString(two_byte_extern_2))); @@ -3821,9 +3838,10 @@ v8::Handle<Value> WhammyPropertyGetter(Local<String> name, THREADED_TEST(WeakReference) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New(); + Whammy* whammy = new Whammy(); templ->SetNamedPropertyHandler(WhammyPropertyGetter, 0, 0, 0, 0, - v8::External::New(new Whammy())); + v8::External::New(whammy)); const char* extension_list[] = { "v8/gc" }; v8::ExtensionConfiguration extensions(1, extension_list); v8::Persistent<Context> context = Context::New(&extensions); @@ -3842,7 +3860,7 @@ THREADED_TEST(WeakReference) { "4"; v8::Handle<Value> result = CompileRun(code); CHECK_EQ(4.0, result->NumberValue()); - + delete whammy; context.Dispose(); } @@ -8612,20 +8630,31 @@ TEST(PreCompileAPIVariationsAreSame) { v8::HandleScope scope; const char* cstring = "function foo(a) { return a+1; }"; + v8::ScriptData* sd_from_cstring = v8::ScriptData::PreCompile(cstring, i::StrLength(cstring)); TestAsciiResource* resource = new TestAsciiResource(cstring); - v8::ScriptData* sd_from_istring = v8::ScriptData::PreCompile( + v8::ScriptData* sd_from_external_string = v8::ScriptData::PreCompile( v8::String::NewExternal(resource)); - CHECK_EQ(sd_from_cstring->Length(), sd_from_istring->Length()); + v8::ScriptData* sd_from_string = v8::ScriptData::PreCompile( + v8::String::New(cstring)); + + CHECK_EQ(sd_from_cstring->Length(), sd_from_external_string->Length()); + CHECK_EQ(0, memcmp(sd_from_cstring->Data(), + sd_from_external_string->Data(), + sd_from_cstring->Length())); + + CHECK_EQ(sd_from_cstring->Length(), sd_from_string->Length()); CHECK_EQ(0, memcmp(sd_from_cstring->Data(), - sd_from_istring->Data(), + sd_from_string->Data(), sd_from_cstring->Length())); + delete sd_from_cstring; - delete sd_from_istring; + delete sd_from_external_string; + delete sd_from_string; } @@ -9049,6 +9078,7 @@ THREADED_TEST(MorphCompositeStringTest) { CHECK_EQ(String::New(expected_slice_on_cons), env->Global()->Get(v8_str("slice_on_cons"))); } + i::DeleteArray(two_byte_string); } @@ -9073,6 +9103,7 @@ TEST(CompileExternalTwoByteSource) { i::StrLength(ascii_sources[i]))); v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource); v8::Script::Compile(source); + i::DeleteArray(two_byte_string); } } @@ -10350,6 +10381,40 @@ THREADED_TEST(CaptureStackTrace) { } +static void StackTraceForUncaughtExceptionListener( + v8::Handle<v8::Message> message, + v8::Handle<Value>) { + v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace(); + CHECK_EQ(2, stack_trace->GetFrameCount()); + checkStackFrame("origin", "foo", 2, 3, false, false, + stack_trace->GetFrame(0)); + checkStackFrame("origin", "bar", 5, 3, false, false, + stack_trace->GetFrame(1)); +} + +TEST(CaptureStackTraceForUncaughtException) { + report_count = 0; + v8::HandleScope scope; + LocalContext env; + v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener); + v8::V8::SetCaptureStackTraceForUncaughtExceptions(true); + + Script::Compile(v8_str("function foo() {\n" + " throw 1;\n" + "};\n" + "function bar() {\n" + " foo();\n" + "};"), + v8_str("origin"))->Run(); + v8::Local<v8::Object> global = env->Global(); + Local<Value> trouble = global->Get(v8_str("bar")); + CHECK(trouble->IsFunction()); + Function::Cast(*trouble)->Call(global, 0, NULL); + v8::V8::SetCaptureStackTraceForUncaughtExceptions(false); + v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener); +} + + // Test that idle notification can be handled and eventually returns true. THREADED_TEST(IdleNotification) { bool rv = false; diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc index 3058c6f86a..5e49c0cdad 100644 --- a/deps/v8/test/cctest/test-assembler-arm.cc +++ b/deps/v8/test/cctest/test-assembler-arm.cc @@ -70,7 +70,6 @@ TEST(0) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -107,7 +106,6 @@ TEST(1) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -153,7 +151,6 @@ TEST(2) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -201,7 +198,6 @@ TEST(3) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -261,7 +257,6 @@ TEST(4) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -301,7 +296,6 @@ TEST(5) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc index e499c6fa85..b60865de45 100644 --- a/deps/v8/test/cctest/test-assembler-ia32.cc +++ b/deps/v8/test/cctest/test-assembler-ia32.cc @@ -70,7 +70,6 @@ TEST(AssemblerIa320) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -108,7 +107,6 @@ TEST(AssemblerIa321) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -150,7 +148,6 @@ TEST(AssemblerIa322) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -185,7 +182,6 @@ TEST(AssemblerIa323) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); // don't print the code - our disassembler can't handle cvttss2si @@ -220,7 +216,6 @@ TEST(AssemblerIa324) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); // don't print the code - our disassembler can't handle cvttsd2si @@ -250,7 +245,6 @@ TEST(AssemblerIa325) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); F0 f = FUNCTION_CAST<F0>(code->entry()); @@ -288,7 +282,6 @@ TEST(AssemblerIa326) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); #ifdef DEBUG @@ -329,7 +322,6 @@ TEST(AssemblerIa328) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); CHECK(code->IsCode()); @@ -385,7 +377,6 @@ TEST(AssemblerIa329) { assm.GetCode(&desc); Code* code = Code::cast(Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value()))); CHECK(code->IsCode()); diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc index f890fc10a0..2bb32e7f5f 100644 --- a/deps/v8/test/cctest/test-disasm-arm.cc +++ b/deps/v8/test/cctest/test-disasm-arm.cc @@ -437,6 +437,11 @@ TEST(Vfp) { "eeb10bc0 vsqrt.f64 d0, d0"); COMPARE(vsqrt(d2, d3, ne), "1eb12bc3 vsqrt.f64ne d2, d3"); + + COMPARE(vmov(d0, 1.0), + "eeb70b00 vmov.f64 d0, #1"); + COMPARE(vmov(d2, -13.0), + "eeba2b0a vmov.f64 d2, #-13"); } VERIFY_RUN(); diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc index e51bfabd4f..40fadd8ef2 100644 --- a/deps/v8/test/cctest/test-disasm-ia32.cc +++ b/deps/v8/test/cctest/test-disasm-ia32.cc @@ -415,7 +415,6 @@ TEST(DisasmIa320) { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc index 7f1e3d8062..a4fcee10ff 100644 --- a/deps/v8/test/cctest/test-heap-profiler.cc +++ b/deps/v8/test/cctest/test-heap-profiler.cc @@ -598,12 +598,13 @@ TEST(HeapSnapshotCodeObjects) { CHECK_NE(NULL, lazy_code); // Verify that non-compiled code doesn't contain references to "x" - // literal, while compiled code does. + // literal, while compiled code does. The scope info is stored in FixedArray + // objects attached to the SharedFunctionInfo. bool compiled_references_x = false, lazy_references_x = false; for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) { const v8::HeapGraphEdge* prop = compiled_code->GetChild(i); const v8::HeapGraphNode* node = prop->GetToNode(); - if (node->GetType() == v8::HeapGraphNode::CODE) { + if (node->GetType() == v8::HeapGraphNode::ARRAY) { if (HasString(node, "x")) { compiled_references_x = true; break; @@ -613,7 +614,7 @@ TEST(HeapSnapshotCodeObjects) { for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) { const v8::HeapGraphEdge* prop = lazy_code->GetChild(i); const v8::HeapGraphNode* node = prop->GetToNode(); - if (node->GetType() == v8::HeapGraphNode::CODE) { + if (node->GetType() == v8::HeapGraphNode::ARRAY) { if (HasString(node, "x")) { lazy_references_x = true; break; diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 195fef49b8..01f23aa133 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -77,7 +77,6 @@ static void CheckFindCodeObject() { CodeDesc desc; assm.GetCode(&desc); Object* code = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(code->IsCode()); @@ -91,7 +90,6 @@ static void CheckFindCodeObject() { } Object* copy = Heap::CreateCode(desc, - NULL, Code::ComputeFlags(Code::STUB), Handle<Object>(Heap::undefined_value())); CHECK(copy->IsCode()); diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status index e461349f26..4fb8f7bf17 100644 --- a/deps/v8/test/es5conform/es5conform.status +++ b/deps/v8/test/es5conform/es5conform.status @@ -49,25 +49,15 @@ chapter15/15.1: FAIL_OK # NOT IMPLEMENTED: seal chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED -# NOT IMPLEMENTED: freeze -chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED # NOT IMPLEMENTED: isSealed chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED -# NOT IMPLEMENTED: isFrozen -chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED # NOT IMPLEMENTED: seal chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: UNIMPLEMENTED -# NOT IMPLEMENTED: freeze -chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: UNIMPLEMENTED - # NOT IMPLEMENTED: isSealed chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: UNIMPLEMENTED -# NOT IMPLEMENTED: isFrozen -chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: UNIMPLEMENTED - # NOT IMPLEMENTED: bind chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED diff --git a/deps/v8/test/mjsunit/apply.js b/deps/v8/test/mjsunit/apply.js index cab7eb82f7..613d37d9e3 100644 --- a/deps/v8/test/mjsunit/apply.js +++ b/deps/v8/test/mjsunit/apply.js @@ -94,7 +94,7 @@ function f() { } return doo; } - + assertEquals("42foofishhorse", f.apply(this, arr), "apply to this"); function s() { @@ -112,28 +112,13 @@ function al() { return arguments.length + arguments[arguments.length - 1]; } -var stack_corner_case_failure = false; - for (var j = 1; j < 0x40000000; j <<= 1) { try { var a = new Array(j); a[j - 1] = 42; assertEquals(42 + j, al.apply(345, a)); } catch (e) { - if (e.toString().indexOf("Maximum call stack size exceeded") != -1) { - // For some combinations of build settings, it may be the case that the - // stack here is just tall enough to contain the array whose size is - // specified by j but is not tall enough to contain the activation - // record for the apply call. Allow one such corner case through, - // checking that the length check will do the right thing for an array - // the next size up. - assertEquals(false, stack_corner_case_failure); - stack_corner_case_failure = true; - continue; - } - assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, - "exception does not contain Function.prototype.apply: " + - e.toString()); + assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1); for (; j < 0x40000000; j <<= 1) { var caught = false; try { @@ -143,9 +128,7 @@ for (var j = 1; j < 0x40000000; j <<= 1) { assertUnreachable("Apply of array with length " + a.length + " should have thrown"); } catch (e) { - assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, - "exception does not contain Function.prototype.apply [" + - "length = " + j + "]: " + e.toString()); + assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1); caught = true; } assertTrue(caught, "exception not caught"); diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js index 85457cd6e1..945b66249b 100644 --- a/deps/v8/test/mjsunit/json.js +++ b/deps/v8/test/mjsunit/json.js @@ -85,7 +85,7 @@ n4.toISOString = function () { }; assertEquals(null, n4.toJSON()); -assertEquals(Object.prototype, JSON.__proto__); +assertTrue(Object.prototype === JSON.__proto__); assertEquals("[object JSON]", Object.prototype.toString.call(JSON)); // DontEnum @@ -313,3 +313,7 @@ TestInvalid('1); throw "foo"; (1'); var x = 0; eval("(1); x++; (1)"); TestInvalid('1); x++; (1'); + +// Test string conversion of argument. +var o = { toString: function() { return "42"; } }; +assertEquals(42, JSON.parse(o)); diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js new file mode 100644 index 0000000000..0ac617762a --- /dev/null +++ b/deps/v8/test/mjsunit/object-freeze.js @@ -0,0 +1,174 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Tests the Object.freeze and Object.isFrozen methods - ES 15.2.3.9 and +// ES 15.2.3.12 + + +// Test that we throw an error if an object is not passed as argument. +var non_objects = new Array(undefined, null, 1, -1, 0, 42.43); +for (var key in non_objects) { + try { + Object.freeze(non_objects[key]); + assertUnreachable(); + } catch(e) { + assertTrue(/Object.freeze called on non-object/.test(e)); + } +} + +for (var key in non_objects) { + try { + Object.isFrozen(non_objects[key]); + assertUnreachable(); + } catch(e) { + assertTrue(/Object.isFrozen called on non-object/.test(e)); + } +} + +// Test normal data properties. +var obj = { x: 42, z: 'foobar' }; +var desc = Object.getOwnPropertyDescriptor(obj, 'x'); +assertTrue(desc.writable); +assertTrue(desc.configurable); +assertEquals(42, desc.value); + +desc = Object.getOwnPropertyDescriptor(obj, 'z'); +assertTrue(desc.writable); +assertTrue(desc.configurable); +assertEquals('foobar', desc.value); + +assertTrue(Object.isExtensible(obj)); +assertFalse(Object.isFrozen(obj)); + +Object.freeze(obj); + +// Make sure we are no longer extensible. +assertFalse(Object.isExtensible(obj)); +assertTrue(Object.isFrozen(obj)); + +try { + obj.foo = 42; + assertUnreachable(); +} catch(e) { + assertTrue(/object is not extensible/.test(e)); +} + +desc = Object.getOwnPropertyDescriptor(obj, 'x'); +assertFalse(desc.writable); +assertFalse(desc.configurable); +assertEquals(42, desc.value); + +desc = Object.getOwnPropertyDescriptor(obj, 'z'); +assertFalse(desc.writable); +assertFalse(desc.configurable); +assertEquals("foobar", desc.value); + +// Make sure that even if we try overwrite a value that is not writable, it is +// not changed. +obj.x = "tete"; +assertEquals(42, obj.x); +obj.x = { get: function() {return 43}, set: function() {} }; +assertEquals(42, obj.x); + +// Test on accessors. +var obj2 = {}; +function get() { return 43; }; +function set() {}; +Object.defineProperty(obj2, 'x', { get: get, set: set, configurable: true }); + +desc = Object.getOwnPropertyDescriptor(obj2, 'x'); +assertTrue(desc.configurable); +assertEquals(undefined, desc.value); +assertEquals(set, desc.set); +assertEquals(get, desc.get); + +assertTrue(Object.isExtensible(obj2)); +assertFalse(Object.isFrozen(obj2)); +Object.freeze(obj2); +assertTrue(Object.isFrozen(obj2)); +assertFalse(Object.isExtensible(obj2)); + +desc = Object.getOwnPropertyDescriptor(obj2, 'x'); +assertFalse(desc.configurable); +assertEquals(undefined, desc.value); +assertEquals(set, desc.set); +assertEquals(get, desc.get); + +try { + obj2.foo = 42; + assertUnreachable(); +} catch(e) { + assertTrue(/object is not extensible/.test(e)); +} + + +// Test freeze on arrays. +var arr = new Array(42,43); + +desc = Object.getOwnPropertyDescriptor(arr, '0'); +assertTrue(desc.configurable); +assertTrue(desc.writable); +assertEquals(42, desc.value); + +desc = Object.getOwnPropertyDescriptor(arr, '1'); +assertTrue(desc.configurable); +assertTrue(desc.writable); +assertEquals(43, desc.value); + +assertTrue(Object.isExtensible(arr)); +assertFalse(Object.isFrozen(arr)); +Object.freeze(arr); +assertTrue(Object.isFrozen(arr)); +assertFalse(Object.isExtensible(arr)); + +desc = Object.getOwnPropertyDescriptor(arr, '0'); +assertFalse(desc.configurable); +assertFalse(desc.writable); +assertEquals(42, desc.value); + +desc = Object.getOwnPropertyDescriptor(arr, '1'); +assertFalse(desc.configurable); +assertFalse(desc.writable); +assertEquals(43, desc.value); + +arr[0] = 'foo'; + +assertEquals(arr[0], 42); + + +// Test that isFrozen return the correct value even if configurable has been set +// to false on all properties manually and the extensible flag has also been set +// to false manually. +var obj3 = { x: 42, y: 'foo' }; + +assertFalse(Object.isFrozen(obj3)); + +Object.defineProperty(obj3, 'x', {configurable: false, writable: false}); +Object.defineProperty(obj3, 'y', {configurable: false, writable: false}); +Object.preventExtensions(obj3); + +assertTrue(Object.isFrozen(obj3)); |